diff --git a/src/symbols.c b/src/symbols.c index 8b8bc7ea..3c513518 100644 --- a/src/symbols.c +++ b/src/symbols.c @@ -985,6 +985,7 @@ static void add_top_level_items(GeanyDocument *doc) &(tv_iters.tag_macro), _("Triggers"), "classviewer-macro", &(tv_iters.tag_member), _("Views"), "classviewer-var", &(tv_iters.tag_other), _("Other"), "classviewer-other", + &(tv_iters.tag_variable), _("Variables"), "classviewer-var", NULL); break; } diff --git a/tagmanager/ctags/read.c b/tagmanager/ctags/read.c index 150d2cfa..2c4537f4 100644 --- a/tagmanager/ctags/read.c +++ b/tagmanager/ctags/read.c @@ -497,6 +497,16 @@ extern int fileGetc (void) return c; } +extern int fileSkipToCharacter (int c) +{ + int d; + do + { + d = fileGetc (); + } while (d != EOF && d != c); + return d; +} + /* An alternative interface to fileGetc (). Do not mix use of fileReadLine() * and fileGetc() for the same file. The returned string does not contain * the terminating newline. A NULL return value means that all lines in the diff --git a/tagmanager/ctags/read.h b/tagmanager/ctags/read.h index e3b15b1a..64103189 100644 --- a/tagmanager/ctags/read.h +++ b/tagmanager/ctags/read.h @@ -101,6 +101,7 @@ extern boolean fileOpen (const char *const fileName, const langType language); extern boolean fileEOF (void); extern void fileClose (void); extern int fileGetc (void); +extern int fileSkipToCharacter (int c); extern void fileUngetc (int c); extern const unsigned char *fileReadLine (void); extern char *readLine (vString *const vLine, MIO *const mio); diff --git a/tagmanager/ctags/sql.c b/tagmanager/ctags/sql.c index 518b3621..eec7975b 100644 --- a/tagmanager/ctags/sql.c +++ b/tagmanager/ctags/sql.c @@ -1,16 +1,18 @@ /* -* Copyright (c) 2002-2003, Darren Hiebert -* -* This source code is released for free distribution under the terms of the -* GNU General Public License. -* -* This module contains functions for generating tags for PL/SQL language -* files. -*/ + * $Id$ + * + * Copyright (c) 2002-2003, Darren Hiebert + * + * This source code is released for free distribution under the terms of the + * GNU General Public License. + * + * This module contains functions for generating tags for PL/SQL language + * files. + */ /* -* INCLUDE FILES -*/ + * INCLUDE FILES + */ #include "general.h" /* must always come first */ #include /* to define isalpha () */ @@ -27,1649 +29,2347 @@ #include "vstring.h" /* -* On-line PL/SQL Reference Guide: -* http://info-it.umsystem.edu/oradocs/doc/server/doc/PLS23/toc.htm -* -* Sample PL/SQL code is available from: -* http://www.orafaq.com/faqscrpt.htm#GENPLSQL -*/ + * On-line "Oracle Database PL/SQL Language Reference": + * http://download.oracle.com/docs/cd/B28359_01/appdev.111/b28370/toc.htm + * + * Sample PL/SQL code is available from: + * http://www.orafaq.com/faqscrpt.htm#GENPLSQL + * + * On-line SQL Anywhere Documentation + * http://www.ianywhere.com/developer/product_manuals/sqlanywhere/index.html + */ /* -* MACROS -*/ + * MACROS + */ #define isType(token,t) (boolean) ((token)->type == (t)) #define isKeyword(token,k) (boolean) ((token)->keyword == (k)) /* -* DATA DECLARATIONS -*/ + * DATA DECLARATIONS + */ typedef enum eException { ExceptionNone, ExceptionEOF } exception_t; -/* Used to specify type of keyword. +/* + * Used to specify type of keyword. */ typedef enum eKeywordId { - KEYWORD_NONE = -1, - KEYWORD_is, - KEYWORD_begin, - KEYWORD_body, - KEYWORD_cursor, - KEYWORD_declare, - KEYWORD_end, - KEYWORD_function, - KEYWORD_if, - KEYWORD_loop, - KEYWORD_case, - KEYWORD_for, - KEYWORD_call, - KEYWORD_package, - KEYWORD_pragma, - KEYWORD_procedure, - KEYWORD_record, - KEYWORD_object, - KEYWORD_ref, - KEYWORD_rem, - KEYWORD_return, - KEYWORD_returns, - KEYWORD_subtype, - KEYWORD_table, - KEYWORD_trigger, - KEYWORD_type, - KEYWORD_index, - KEYWORD_event, - KEYWORD_publication, - KEYWORD_service, - KEYWORD_domain, - KEYWORD_datatype, - KEYWORD_result, - KEYWORD_when, - KEYWORD_then, - KEYWORD_variable, - KEYWORD_exception, - KEYWORD_at, - KEYWORD_on, - KEYWORD_primary, - KEYWORD_references, - KEYWORD_unique, - KEYWORD_check, - KEYWORD_constraint, - KEYWORD_foreign, - KEYWORD_ml_table, - KEYWORD_ml_conn, - KEYWORD_local, - KEYWORD_temporary, - KEYWORD_drop, - KEYWORD_view, - KEYWORD_synonym, - KEYWORD_handler + KEYWORD_NONE = -1, + KEYWORD_is, + KEYWORD_begin, + KEYWORD_body, + KEYWORD_cursor, + KEYWORD_declare, + KEYWORD_end, + KEYWORD_function, + KEYWORD_if, + KEYWORD_else, + KEYWORD_elseif, + KEYWORD_endif, + KEYWORD_loop, + KEYWORD_while, + KEYWORD_case, + KEYWORD_for, + KEYWORD_do, + KEYWORD_call, + KEYWORD_package, + KEYWORD_pragma, + KEYWORD_procedure, + KEYWORD_record, + KEYWORD_object, + KEYWORD_ref, + KEYWORD_rem, + KEYWORD_return, + KEYWORD_returns, + KEYWORD_subtype, + KEYWORD_table, + KEYWORD_trigger, + KEYWORD_type, + KEYWORD_index, + KEYWORD_event, + KEYWORD_publication, + KEYWORD_service, + KEYWORD_domain, + KEYWORD_datatype, + KEYWORD_result, + KEYWORD_url, + KEYWORD_internal, + KEYWORD_external, + KEYWORD_when, + KEYWORD_then, + KEYWORD_variable, + KEYWORD_exception, + KEYWORD_at, + KEYWORD_on, + KEYWORD_primary, + KEYWORD_references, + KEYWORD_unique, + KEYWORD_check, + KEYWORD_constraint, + KEYWORD_foreign, + KEYWORD_ml_table, + KEYWORD_ml_table_lang, + KEYWORD_ml_table_dnet, + KEYWORD_ml_table_java, + KEYWORD_ml_table_chk, + KEYWORD_ml_conn, + KEYWORD_ml_conn_lang, + KEYWORD_ml_conn_dnet, + KEYWORD_ml_conn_java, + KEYWORD_ml_conn_chk, + KEYWORD_ml_prop, + KEYWORD_local, + KEYWORD_temporary, + KEYWORD_drop, + KEYWORD_view, + KEYWORD_synonym, + KEYWORD_handler, + KEYWORD_comment, + KEYWORD_create, + KEYWORD_go } keywordId; -/* Used to determine whether keyword is valid for the token language and - * what its ID is. +/* + * Used to determine whether keyword is valid for the token language and + * what its ID is. */ typedef struct sKeywordDesc { - const char *name; - keywordId id; + const char *name; + keywordId id; } keywordDesc; typedef enum eTokenType { - TOKEN_UNDEFINED, - TOKEN_BLOCK_LABEL_BEGIN, - TOKEN_BLOCK_LABEL_END, - TOKEN_CHARACTER, - TOKEN_CLOSE_PAREN, - TOKEN_SEMICOLON, - TOKEN_COMMA, - TOKEN_IDENTIFIER, - TOKEN_KEYWORD, - TOKEN_OPEN_PAREN, - TOKEN_OPERATOR, - TOKEN_OTHER, - TOKEN_STRING, - TOKEN_PERIOD + TOKEN_UNDEFINED, + TOKEN_BLOCK_LABEL_BEGIN, + TOKEN_BLOCK_LABEL_END, + TOKEN_CHARACTER, + TOKEN_CLOSE_PAREN, + TOKEN_COLON, + TOKEN_SEMICOLON, + TOKEN_COMMA, + TOKEN_IDENTIFIER, + TOKEN_KEYWORD, + TOKEN_OPEN_PAREN, + TOKEN_OPERATOR, + TOKEN_OTHER, + TOKEN_STRING, + TOKEN_PERIOD, + TOKEN_OPEN_CURLY, + TOKEN_CLOSE_CURLY, + TOKEN_OPEN_SQUARE, + TOKEN_CLOSE_SQUARE, + TOKEN_TILDE, + TOKEN_FORWARD_SLASH, + TOKEN_EQUAL } tokenType; -typedef struct sTokenInfo { - tokenType type; - keywordId keyword; - vString * string; - vString * scope; - unsigned long lineNumber; - MIOPos filePosition; +typedef struct sTokenInfoSQL { + tokenType type; + keywordId keyword; + vString * string; + vString * scope; + int scopeKind; + int begin_end_nest_lvl; + unsigned long lineNumber; + MIOPos filePosition; } tokenInfo; /* -* DATA DEFINITIONS -*/ + * DATA DEFINITIONS + */ static langType Lang_sql; static jmp_buf Exception; typedef enum { - SQLTAG_CURSOR, - SQLTAG_PROTOTYPE, - SQLTAG_FUNCTION, - SQLTAG_FIELD, - SQLTAG_LOCAL_VARIABLE, - SQLTAG_BLOCK_LABEL, - SQLTAG_PACKAGE, - SQLTAG_PROCEDURE, - SQLTAG_RECORD, - SQLTAG_SUBTYPE, - SQLTAG_TABLE, - SQLTAG_TRIGGER, - SQLTAG_VARIABLE, - SQLTAG_INDEX, - SQLTAG_EVENT, - SQLTAG_PUBLICATION, - SQLTAG_SERVICE, - SQLTAG_DOMAIN, - SQLTAG_VIEW, - SQLTAG_SYNONYM, - SQLTAG_MLTABLE, - SQLTAG_MLCONN, - SQLTAG_COUNT + SQLTAG_CURSOR, + SQLTAG_PROTOTYPE, + SQLTAG_FUNCTION, + SQLTAG_FIELD, + SQLTAG_LOCAL_VARIABLE, + SQLTAG_BLOCK_LABEL, + SQLTAG_PACKAGE, + SQLTAG_PROCEDURE, + SQLTAG_RECORD, + SQLTAG_SUBTYPE, + SQLTAG_TABLE, + SQLTAG_TRIGGER, + SQLTAG_VARIABLE, + SQLTAG_INDEX, + SQLTAG_EVENT, + SQLTAG_PUBLICATION, + SQLTAG_SERVICE, + SQLTAG_DOMAIN, + SQLTAG_VIEW, + SQLTAG_SYNONYM, + SQLTAG_MLTABLE, + SQLTAG_MLCONN, + SQLTAG_MLPROP, + SQLTAG_COUNT } sqlKind; static kindOption SqlKinds [] = { - { TRUE, 'c', "cursor", "cursors" }, - { FALSE, 'd', "prototype", "prototypes" }, - { TRUE, 'f', "function", "functions" }, - { TRUE, 'F', "field", "record fields" }, - { FALSE, 'l', "local", "local variables" }, - { TRUE, 'L', "label", "block label" }, - { TRUE, 'P', "package", "packages" }, - { TRUE, 'n', "namespace", "procedures" }, - { FALSE, 'r', "record", "records" }, - { TRUE, 's', "subtype", "subtypes" }, - { TRUE, 't', "class", "tables" }, - { TRUE, 'T', "macro", "triggers" }, - { TRUE, 'v', "variable", "variables" }, - { TRUE, 'i', "struct", "indexes" }, - { TRUE, 'e', "event", "events" }, - { TRUE, 'U', "publication", "publications" }, - { TRUE, 'R', "service", "services" }, - { TRUE, 'D', "domain", "domains" }, - { TRUE, 'm', "member", "views" }, - { TRUE, 'n', "synonym", "synonyms" }, - { TRUE, 'x', "mltable", "MobiLink Table Scripts" }, - { TRUE, 'y', "mlconn", "MobiLink Conn Scripts" } + { TRUE, 'c', "cursor", "cursors" }, + { FALSE, 'd', "prototype", "prototypes" }, + { TRUE, 'f', "function", "functions" }, + { TRUE, 'F', "field", "record fields" }, + { FALSE, 'l', "local", "local variables" }, + { TRUE, 'L', "label", "block label" }, + { TRUE, 'P', "package", "packages" }, + { TRUE, 'n', "namespace", "procedures" }, + { FALSE, 'r', "record", "records" }, + { TRUE, 's', "subtype", "subtypes" }, + { TRUE, 't', "class", "tables" }, + { TRUE, 'T', "macro", "triggers" }, + { TRUE, 'v', "variable", "variables" }, + { TRUE, 'i', "struct", "indexes" }, + { TRUE, 'e', "event", "events" }, + { TRUE, 'U', "publication", "publications" }, + { TRUE, 'R', "service", "services" }, + { TRUE, 'D', "domain", "domains" }, + { TRUE, 'm', "member", "views" }, + { TRUE, 'n', "synonym", "synonyms" }, + { TRUE, 'x', "mltable", "MobiLink Table Scripts" }, + { TRUE, 'y', "mlconn", "MobiLink Conn Scripts" }, + { TRUE, 'z', "mlprop", "MobiLink Properties " } }; static const keywordDesc SqlKeywordTable [] = { - /* keyword keyword ID */ - { "as", KEYWORD_is }, - { "begin", KEYWORD_begin }, - { "body", KEYWORD_body }, - { "cursor", KEYWORD_cursor }, - { "declare", KEYWORD_declare }, - { "end", KEYWORD_end }, - { "function", KEYWORD_function }, - { "if", KEYWORD_if }, - { "is", KEYWORD_is }, - { "loop", KEYWORD_loop }, - { "case", KEYWORD_case }, - { "for", KEYWORD_for }, - { "call", KEYWORD_call }, - { "package", KEYWORD_package }, - { "pragma", KEYWORD_pragma }, - { "procedure", KEYWORD_procedure }, - { "record", KEYWORD_record }, - { "object", KEYWORD_object }, - { "ref", KEYWORD_ref }, - { "rem", KEYWORD_rem }, - { "return", KEYWORD_return }, - { "returns", KEYWORD_returns }, - { "subtype", KEYWORD_subtype }, - { "table", KEYWORD_table }, - { "trigger", KEYWORD_trigger }, - { "type", KEYWORD_type }, - { "index", KEYWORD_index }, - { "event", KEYWORD_event }, - { "publication", KEYWORD_publication }, - { "service", KEYWORD_service }, - { "domain", KEYWORD_domain }, - { "datatype", KEYWORD_datatype }, - { "result", KEYWORD_result }, - { "when", KEYWORD_when }, - { "then", KEYWORD_then }, - { "variable", KEYWORD_variable }, - { "exception", KEYWORD_exception }, - { "at", KEYWORD_at }, - { "on", KEYWORD_on }, - { "primary", KEYWORD_primary }, - { "references", KEYWORD_references }, - { "unique", KEYWORD_unique }, - { "check", KEYWORD_check }, - { "constraint", KEYWORD_constraint }, - { "foreign", KEYWORD_foreign }, - { "ml_add_table_script", KEYWORD_ml_table }, - { "ml_add_connection_script", KEYWORD_ml_conn }, - { "local", KEYWORD_local }, - { "temporary", KEYWORD_temporary }, - { "drop", KEYWORD_drop }, - { "view", KEYWORD_view }, - { "synonym", KEYWORD_synonym }, - { "handler", KEYWORD_handler } + /* keyword keyword ID */ + { "as", KEYWORD_is }, + { "is", KEYWORD_is }, + { "begin", KEYWORD_begin }, + { "body", KEYWORD_body }, + { "cursor", KEYWORD_cursor }, + { "declare", KEYWORD_declare }, + { "end", KEYWORD_end }, + { "function", KEYWORD_function }, + { "if", KEYWORD_if }, + { "else", KEYWORD_else }, + { "elseif", KEYWORD_elseif }, + { "endif", KEYWORD_endif }, + { "loop", KEYWORD_loop }, + { "while", KEYWORD_while }, + { "case", KEYWORD_case }, + { "for", KEYWORD_for }, + { "do", KEYWORD_do }, + { "call", KEYWORD_call }, + { "package", KEYWORD_package }, + { "pragma", KEYWORD_pragma }, + { "procedure", KEYWORD_procedure }, + { "record", KEYWORD_record }, + { "object", KEYWORD_object }, + { "ref", KEYWORD_ref }, + { "rem", KEYWORD_rem }, + { "return", KEYWORD_return }, + { "returns", KEYWORD_returns }, + { "subtype", KEYWORD_subtype }, + { "table", KEYWORD_table }, + { "trigger", KEYWORD_trigger }, + { "type", KEYWORD_type }, + { "index", KEYWORD_index }, + { "event", KEYWORD_event }, + { "publication", KEYWORD_publication }, + { "service", KEYWORD_service }, + { "domain", KEYWORD_domain }, + { "datatype", KEYWORD_datatype }, + { "result", KEYWORD_result }, + { "url", KEYWORD_url }, + { "internal", KEYWORD_internal }, + { "external", KEYWORD_external }, + { "when", KEYWORD_when }, + { "then", KEYWORD_then }, + { "variable", KEYWORD_variable }, + { "exception", KEYWORD_exception }, + { "at", KEYWORD_at }, + { "on", KEYWORD_on }, + { "primary", KEYWORD_primary }, + { "references", KEYWORD_references }, + { "unique", KEYWORD_unique }, + { "check", KEYWORD_check }, + { "constraint", KEYWORD_constraint }, + { "foreign", KEYWORD_foreign }, + { "ml_add_table_script", KEYWORD_ml_table }, + { "ml_add_lang_table_script", KEYWORD_ml_table_lang }, + { "ml_add_dnet_table_script", KEYWORD_ml_table_dnet }, + { "ml_add_java_table_script", KEYWORD_ml_table_java }, + { "ml_add_lang_table_script_chk", KEYWORD_ml_table_chk }, + { "ml_add_connection_script", KEYWORD_ml_conn }, + { "ml_add_lang_connection_script", KEYWORD_ml_conn_lang }, + { "ml_add_dnet_connection_script", KEYWORD_ml_conn_dnet }, + { "ml_add_java_connection_script", KEYWORD_ml_conn_java }, + { "ml_add_lang_conn_script_chk", KEYWORD_ml_conn_chk }, + { "ml_add_property", KEYWORD_ml_prop }, + { "local", KEYWORD_local }, + { "temporary", KEYWORD_temporary }, + { "drop", KEYWORD_drop }, + { "view", KEYWORD_view }, + { "synonym", KEYWORD_synonym }, + { "handler", KEYWORD_handler }, + { "comment", KEYWORD_comment }, + { "create", KEYWORD_create }, + { "go", KEYWORD_go } }; /* -* FUNCTION DECLARATIONS -*/ + * FUNCTION DECLARATIONS + */ +/* Recursive calls */ static void parseBlock (tokenInfo *const token, const boolean local); -static void makeConstTag (tokenInfo *const token, const sqlKind kind); +static void parseDeclare (tokenInfo *const token, const boolean local); +static void parseKeywords (tokenInfo *const token); +static void parseSqlFile (tokenInfo *const token); /* -* DEBUG function -*/ - -static void dispToken (tokenInfo *const token, const char * location) -{ -#ifdef DEBUG - if ( isKeyword(token, KEYWORD_NONE) ) - { - if ( isType(token, TOKEN_IDENTIFIER) || isType(token, TOKEN_STRING) ) - { - printf( "\n%s: token string t:%s s:%s l:%lu p:%d\n" - , location - , vStringValue(token->string) - , vStringValue(token->scope) - , token->lineNumber - , token->bufferPosition - ); - } else { - printf( "\n%s: token t:%d s:%s l:%lu p:%d\n" - , location - , token->type - , vStringValue(token->scope) - , token->lineNumber - , token->bufferPosition - ); - } - } else { - printf( "\n%s: keyword:%s k:%d s:%s l:%lu p:%d\n" - , location - , vStringValue(token->string) - , token->keyword - , vStringValue(token->scope) - , token->lineNumber - , token->bufferPosition - ); - } -#endif -} - -/* -* FUNCTION DEFINITIONS -*/ + * FUNCTION DEFINITIONS + */ static boolean isIdentChar1 (const int c) { - /* Other databases are less restrictive on the first character of - * an identifier. - * isIdentChar1 is used to identify the first character of an - * identifier, so we are removing some restrictions. */ - return (boolean) - (isalpha (c) || c == '@' || c == '_' ); + /* + * Other databases are less restrictive on the first character of + * an identifier. + * isIdentChar1 is used to identify the first character of an + * identifier, so we are removing some restrictions. + */ + return (boolean) + (isalpha (c) || c == '@' || c == '_' ); } static boolean isIdentChar (const int c) { - return (boolean) - (isalpha (c) || isdigit (c) || c == '$' || - c == '@' || c == '_' || c == '#'); + return (boolean) + (isalpha (c) || isdigit (c) || c == '$' || + c == '@' || c == '_' || c == '#'); +} + +static boolean isCmdTerm (tokenInfo *const token) +{ + DebugStatement ( + debugPrintf (DEBUG_PARSE + , "\n isCmdTerm: token same tt:%d tk:%d\n" + , token->type + , token->keyword + ); + ); + + /* + * Based on the various customer sites I have been at + * the most common command delimiters are + * ; + * ~ + * / + * go + * This routine will check for any of these, more + * can easily be added by modifying readToken and + * either adding the character to: + * enum eTokenType + * enum eTokenType + */ + return ( isType (token, TOKEN_SEMICOLON) || + isType (token, TOKEN_TILDE) || + isType (token, TOKEN_FORWARD_SLASH) || + isKeyword (token, KEYWORD_go) + ); +} + +static boolean isMatchedEnd(tokenInfo *const token, int nest_lvl) +{ + boolean terminated = FALSE; + /* + * Since different forms of SQL allow the use of + * BEGIN + * ... + * END + * blocks, some statements may not be terminated using + * the standard delimiters: + * ; + * ~ + * / + * go + * This routine will check to see if we encounter and END + * for the matching nest level of BEGIN ... END statements. + * If we find one, then we can assume, the statement was terminated + * since we have fallen through to the END statement of the BEGIN + * block. + */ + if ( nest_lvl > 0 && isKeyword (token, KEYWORD_end) ) + { + if ( token->begin_end_nest_lvl == nest_lvl ) + terminated = TRUE; + } + + return terminated; } static void buildSqlKeywordHash (void) { - const size_t count = sizeof (SqlKeywordTable) / - sizeof (SqlKeywordTable [0]); - size_t i; - for (i = 0 ; i < count ; ++i) - { - const keywordDesc* const p = &SqlKeywordTable [i]; - addKeyword (p->name, Lang_sql, (int) p->id); - } + const size_t count = sizeof (SqlKeywordTable) / + sizeof (SqlKeywordTable [0]); + size_t i; + for (i = 0 ; i < count ; ++i) + { + const keywordDesc* const p = &SqlKeywordTable [i]; + addKeyword (p->name, Lang_sql, (int) p->id); + } } static tokenInfo *newToken (void) { - tokenInfo *const token = xMalloc(1, tokenInfo); + tokenInfo *const token = xMalloc (1, tokenInfo); - token->type = TOKEN_UNDEFINED; - token->keyword = KEYWORD_NONE; - token->string = vStringNew (); - token->scope = vStringNew (); + token->type = TOKEN_UNDEFINED; + token->keyword = KEYWORD_NONE; + token->string = vStringNew (); + token->scope = vStringNew (); + token->scopeKind = SQLTAG_COUNT; + token->begin_end_nest_lvl = 0; + token->lineNumber = getSourceLineNumber (); + token->filePosition = getInputFilePosition (); - return token; + return token; } static void deleteToken (tokenInfo *const token) { - vStringDelete (token->string); - eFree (token); + vStringDelete (token->string); + vStringDelete (token->scope); + eFree (token); +} + +static int analyzeToken (vString *const name, langType language) +{ + vString *keyword = vStringNew (); + int result; + vStringCopyToLower (keyword, name); + result = lookupKeyword (vStringValue (keyword), language); + vStringDelete (keyword); + return result; } /* -* Tag generation functions -*/ + * Tag generation functions + */ static void makeSqlTag (tokenInfo *const token, const sqlKind kind) { - vString * fulltag; + if (SqlKinds [kind].enabled) + { + const char *const name = vStringValue (token->string); + tagEntryInfo e; + initTagEntry (&e, name); - if (SqlKinds [kind].enabled) - { - /* - * If a scope has been added to the token, change the token - * string to include the scope when making the tag. - */ - if ( vStringLength(token->scope) > 0 ) - { - fulltag = vStringNew (); - vStringCopy(fulltag, token->scope); - vStringCatS (fulltag, "."); - vStringCatS (fulltag, vStringValue(token->string)); - vStringTerminate(fulltag); - vStringCopy(token->string, fulltag); - vStringDelete (fulltag); - } - makeConstTag (token, kind); - } -} + e.lineNumber = token->lineNumber; + e.filePosition = token->filePosition; + e.kindName = SqlKinds [kind].name; + e.kind = SqlKinds [kind].letter; -static void makeConstTag (tokenInfo *const token, const sqlKind kind) -{ - if (SqlKinds [kind].enabled) - { - const char *const name = vStringValue (token->string); - tagEntryInfo e; - initTagEntry (&e, name); + if (vStringLength (token->scope) > 0) + { + Assert (token->scopeKind < SQLTAG_COUNT); + e.extensionFields.scope[0] = SqlKinds [token->scopeKind].name; + e.extensionFields.scope[1] = vStringValue (token->scope); + } - e.lineNumber = token->lineNumber; - e.filePosition = token->filePosition; - e.kindName = SqlKinds [kind].name; - e.kind = SqlKinds [kind].letter; - - makeTagEntry (&e); - } + makeTagEntry (&e); + } } /* -* Parsing functions -*/ - -static int skipToCharacter (const int c) -{ - int d; - do - { - d = fileGetc (); - } while (d != EOF && d != c); - return d; -} + * Parsing functions + */ static void parseString (vString *const string, const int delimiter) { - boolean end = FALSE; - int c; - while (! end) - { - c = fileGetc (); -/* printf( "\nps: %c\n", c ); */ - if (c == EOF) - end = TRUE; - else if (c == delimiter) - end = TRUE; - else - vStringPut (string, c); - } - vStringTerminate (string); + boolean end = FALSE; + while (! end) + { + int c = fileGetc (); + if (c == EOF) + end = TRUE; + /* + else if (c == '\\') + { + c = fileGetc(); // This maybe a ' or ". // + vStringPut(string, c); + } + */ + else if (c == delimiter) + end = TRUE; + else + vStringPut (string, c); + } + vStringTerminate (string); } -/* Read a C identifier beginning with "firstChar" and places it into "name". - */ +/* Read a C identifier beginning with "firstChar" and places it into "name". +*/ static void parseIdentifier (vString *const string, const int firstChar) { - int c = firstChar; - Assert (isIdentChar1 (c)); - do - { - vStringPut (string, c); - c = fileGetc (); - } while (isIdentChar (c)); - vStringTerminate (string); - if (!isspace (c)) - fileUngetc (c); /* unget non-identifier character */ -} - -static keywordId analyzeToken (vString *const name) -{ - static vString *keyword = NULL; - if (keyword == NULL) - keyword = vStringNew (); - vStringCopyToLower (keyword, name); - return (keywordId) lookupKeyword (vStringValue (keyword), Lang_sql); + int c = firstChar; + Assert (isIdentChar1 (c)); + do + { + vStringPut (string, c); + c = fileGetc (); + } while (isIdentChar (c)); + vStringTerminate (string); + if (!isspace (c)) + fileUngetc (c); /* unget non-identifier character */ } static void readToken (tokenInfo *const token) { - int c; + int c; - token->type = TOKEN_UNDEFINED; - token->keyword = KEYWORD_NONE; - vStringClear (token->string); + token->type = TOKEN_UNDEFINED; + token->keyword = KEYWORD_NONE; + vStringClear (token->string); getNextChar: - do - { - c = fileGetc (); -/* printf( "\nrtc: %c\n", c ); */ - /* - * Added " to the list of ignores, not sure what this - * might break but it gets by this issue: - * create table "t1" (...) - */ - } - while (c == '\t' || c == ' ' || c == '\n'); - - switch (c) - { - case EOF: longjmp (Exception, (int)ExceptionEOF); break; - case '(': token->type = TOKEN_OPEN_PAREN; break; - case ')': token->type = TOKEN_CLOSE_PAREN; break; - case ';': token->type = TOKEN_SEMICOLON; break; - case '.': token->type = TOKEN_PERIOD; break; - case ',': token->type = TOKEN_COMMA; break; - - case '\'': - case '"': - token->type = TOKEN_STRING; - parseString (token->string, c); - token->lineNumber = getSourceLineNumber (); - token->filePosition = getInputFilePosition (); - break; - - case '-': - c = fileGetc (); - if (c == '-') /* is this the start of a comment? */ - { - skipToCharacter ('\n'); - goto getNextChar; - } - else - { - if (!isspace (c)) - fileUngetc (c); - token->type = TOKEN_OPERATOR; - } - break; - - case '<': - case '>': + do { - const int initial = c; - int d = fileGetc (); - if (d == initial) - { - if (initial == '<') - token->type = TOKEN_BLOCK_LABEL_BEGIN; - else - token->type = TOKEN_BLOCK_LABEL_END; - } - else - { - fileUngetc (d); - token->type = TOKEN_UNDEFINED; - } - break; - } - - case '/': - { - int d = fileGetc (); - if (d != '*') /* is this the start of a comment? */ - fileUngetc (d); - else - { - do - { - skipToCharacter ('*'); - c = fileGetc (); - if (c == '/') - break; - else - fileUngetc (c); - } while (c != EOF && c != '\0'); - goto getNextChar; - } - break; - } - - default: - if (! isIdentChar1 (c)) - token->type = TOKEN_UNDEFINED; - else - { - parseIdentifier (token->string, c); - token->lineNumber = getSourceLineNumber (); + c = fileGetc (); + token->lineNumber = getSourceLineNumber (); token->filePosition = getInputFilePosition (); - token->keyword = analyzeToken (token->string); - if (isKeyword (token, KEYWORD_rem)) - { - vStringClear (token->string); - skipToCharacter ('\n'); - goto getNextChar; - } - else if (isKeyword (token, KEYWORD_NONE)) - token->type = TOKEN_IDENTIFIER; - else - token->type = TOKEN_KEYWORD; - } - break; - } - /*dispToken(token, "rte");*/ + /* + * Added " to the list of ignores, not sure what this + * might break but it gets by this issue: + * create table "t1" (...) + * + * Darren, the code passes all my tests for both + * Oracle and SQL Anywhere, but maybe you can tell me + * what this may effect. + */ + } + while (c == '\t' || c == ' ' || c == '\n'); + + switch (c) + { + case EOF: longjmp (Exception, (int)ExceptionEOF); break; + case '(': token->type = TOKEN_OPEN_PAREN; break; + case ')': token->type = TOKEN_CLOSE_PAREN; break; + case ':': token->type = TOKEN_COLON; break; + case ';': token->type = TOKEN_SEMICOLON; break; + case '.': token->type = TOKEN_PERIOD; break; + case ',': token->type = TOKEN_COMMA; break; + case '{': token->type = TOKEN_OPEN_CURLY; break; + case '}': token->type = TOKEN_CLOSE_CURLY; break; + case '~': token->type = TOKEN_TILDE; break; + case '[': token->type = TOKEN_OPEN_SQUARE; break; + case ']': token->type = TOKEN_CLOSE_SQUARE; break; + case '=': token->type = TOKEN_EQUAL; break; + + case '\'': + case '"': + token->type = TOKEN_STRING; + parseString (token->string, c); + token->lineNumber = getSourceLineNumber (); + token->filePosition = getInputFilePosition (); + break; + + case '-': + c = fileGetc (); + if (c == '-') /* -- is this the start of a comment? */ + { + fileSkipToCharacter ('\n'); + goto getNextChar; + } + else + { + if (!isspace (c)) + fileUngetc (c); + token->type = TOKEN_OPERATOR; + } + break; + + case '<': + case '>': + { + const int initial = c; + int d = fileGetc (); + if (d == initial) + { + if (initial == '<') + token->type = TOKEN_BLOCK_LABEL_BEGIN; + else + token->type = TOKEN_BLOCK_LABEL_END; + } + else + { + fileUngetc (d); + token->type = TOKEN_UNDEFINED; + } + break; + } + + case '\\': + c = fileGetc (); + if (c != '\\' && c != '"' && c != '\'' && !isspace (c)) + fileUngetc (c); + token->type = TOKEN_CHARACTER; + token->lineNumber = getSourceLineNumber (); + token->filePosition = getInputFilePosition (); + break; + + case '/': + { + int d = fileGetc (); + if ( (d != '*') && /* is this the start of a comment? */ + (d != '/') ) /* is a one line comment? */ + { + token->type = TOKEN_FORWARD_SLASH; + fileUngetc (d); + } + else + { + if (d == '*') + { + do + { + fileSkipToCharacter ('*'); + c = fileGetc (); + if (c == '/') + break; + else + fileUngetc (c); + } while (c != EOF && c != '\0'); + goto getNextChar; + } + else if (d == '/') /* is this the start of a comment? */ + { + fileSkipToCharacter ('\n'); + goto getNextChar; + } + } + break; + } + + default: + if (! isIdentChar1 (c)) + token->type = TOKEN_UNDEFINED; + else + { + parseIdentifier (token->string, c); + token->lineNumber = getSourceLineNumber (); + token->filePosition = getInputFilePosition (); + token->keyword = analyzeToken (token->string, Lang_sql); + if (isKeyword (token, KEYWORD_rem)) + { + vStringClear (token->string); + fileSkipToCharacter ('\n'); + goto getNextChar; + } + else if (isKeyword (token, KEYWORD_NONE)) + token->type = TOKEN_IDENTIFIER; + else + token->type = TOKEN_KEYWORD; + } + break; + } } /* -* Token parsing functions -*/ + * reads an indentifier, possibly quoted: + * identifier + * "identifier" + * [identifier] + */ +static void readIdentifier (tokenInfo *const token) +{ + readToken (token); + if (isType (token, TOKEN_OPEN_SQUARE)) + { + tokenInfo *const close_square = newToken (); -/* - unused - I don't know (enrico) -static void addContext (tokenInfo* const parent, const tokenInfo* const child) -{ - if (vStringLength (parent->string) > 0) - { - vStringCatS (parent->string, "."); - } - vStringCatS (parent->string, vStringValue(child->string)); - vStringTerminate(parent->string); -} -*/ -static void addToScope (tokenInfo* const token, vString* const extra) -{ - if (vStringLength (token->scope) > 0) - { - vStringCatS (token->scope, "."); - } - vStringCatS (token->scope, vStringValue(extra)); - vStringTerminate(token->scope); + readToken (token); + /* eat close swuare */ + readToken (close_square); + deleteToken (close_square); + } } /* -* Scanning functions -*/ + * Token parsing functions + */ + +/* + * static void addContext (tokenInfo* const parent, const tokenInfo* const child) + * { + * if (vStringLength (parent->string) > 0) + * { + * vStringCatS (parent->string, "."); + * } + * vStringCatS (parent->string, vStringValue(child->string)); + * vStringTerminate(parent->string); + * } + */ + +static void addToScope (tokenInfo* const token, vString* const extra, sqlKind kind) +{ + if (vStringLength (token->scope) > 0) + { + vStringCatS (token->scope, "."); + } + vStringCatS (token->scope, vStringValue(extra)); + vStringTerminate(token->scope); + token->scopeKind = kind; +} + +/* + * Scanning functions + */ static void findToken (tokenInfo *const token, const tokenType type) { - while (! isType (token, type)) - { - readToken (token); - } + while (! isType (token, type)) + { + readToken (token); + } +} + +static void findCmdTerm (tokenInfo *const token, const boolean check_first) +{ + int begin_end_nest_lvl = token->begin_end_nest_lvl; + + if ( check_first ) + { + if ( isCmdTerm(token) ) + return; + } + do + { + readToken (token); + } while ( !isCmdTerm(token) && !isMatchedEnd(token, begin_end_nest_lvl) ); +} + +static void skipToMatched(tokenInfo *const token) +{ + int nest_level = 0; + tokenType open_token; + tokenType close_token; + + switch (token->type) + { + case TOKEN_OPEN_PAREN: + open_token = TOKEN_OPEN_PAREN; + close_token = TOKEN_CLOSE_PAREN; + break; + case TOKEN_OPEN_CURLY: + open_token = TOKEN_OPEN_CURLY; + close_token = TOKEN_CLOSE_CURLY; + break; + case TOKEN_OPEN_SQUARE: + open_token = TOKEN_OPEN_SQUARE; + close_token = TOKEN_CLOSE_SQUARE; + break; + default: + return; + } + + /* + * This routine will skip to a matching closing token. + * It will also handle nested tokens like the (, ) below. + * ( name varchar(30), text binary(10) ) + */ + + if (isType (token, open_token)) + { + nest_level++; + while (! (isType (token, close_token) && (nest_level == 0))) + { + readToken (token); + if (isType (token, open_token)) + { + nest_level++; + } + if (isType (token, close_token)) + { + if (nest_level > 0) + { + nest_level--; + } + } + } + readToken (token); + } +} + +static void copyToken (tokenInfo *const dest, tokenInfo *const src) +{ + dest->lineNumber = src->lineNumber; + dest->filePosition = src->filePosition; + dest->type = src->type; + dest->keyword = src->keyword; + vStringCopy(dest->string, src->string); + vStringCopy(dest->scope, src->scope); + dest->scopeKind = src->scopeKind; } static void skipArgumentList (tokenInfo *const token) { - int nest_level = 0; + /* + * Other databases can have arguments with fully declared + * datatypes: + * ( name varchar(30), text binary(10) ) + * So we must check for nested open and closing parantheses + */ - /* - * Other databases can have arguments with fully declared - * datatypes: - * ( name varchar(30), text binary(10) ) - * So we must check for nested open and closing parantheses - */ - - if (isType (token, TOKEN_OPEN_PAREN)) /* arguments? */ - { - nest_level++; - /*findToken (token, TOKEN_CLOSE_PAREN);*/ - while (! (isType (token, TOKEN_CLOSE_PAREN) && (nest_level == 0))) - { - readToken (token); - if (isType (token, TOKEN_OPEN_PAREN)) - { - nest_level++; - } - if (isType (token, TOKEN_CLOSE_PAREN)) - { - if (nest_level > 0) - { - nest_level--; - } - } - } /*while*/ - readToken (token); - } + if (isType (token, TOKEN_OPEN_PAREN)) /* arguments? */ + { + skipToMatched (token); + } } static void parseSubProgram (tokenInfo *const token) { - tokenInfo *const name = newToken (); + tokenInfo *const name = newToken (); + vString * saveScope = vStringNew (); + sqlKind saveScopeKind; - /* - * Prototype: - * FUNCTION func_name RETURN integer; - * PROCEDURE proc_name( parameters ); - * Procedure - * FUNCTION GET_ML_USERNAME RETURN VARCHAR2 - * IS - * BEGIN - * RETURN v_sync_user_id; - * END GET_ML_USERNAME; - * - * PROCEDURE proc_name( parameters ) - * IS - * BEGIN - * END; - * CREATE PROCEDURE proc_name( parameters ) - * EXTERNAL NAME ... ; - * CREATE PROCEDURE proc_name( parameters ) - * BEGIN - * END; - * - * CREATE FUNCTION f_GetClassName( - * IN @object VARCHAR(128) - * ,IN @code VARCHAR(128) - * ) - * RETURNS VARCHAR(200) - * DETERMINISTIC - * BEGIN - * - * IF( @object = 'user_state' ) THEN - * SET something = something; - * END IF; - * - * RETURN @name; - * END; - */ - const sqlKind kind = isKeyword (token, KEYWORD_function) ? - SQLTAG_FUNCTION : SQLTAG_PROCEDURE; - Assert (isKeyword (token, KEYWORD_function) || - isKeyword (token, KEYWORD_procedure)); - readToken (name); - readToken (token); - if (isType (token, TOKEN_PERIOD)) - { - readToken (name); - readToken (token); - } - skipArgumentList (token); + /* + * This must handle both prototypes and the body of + * the procedures. + * + * Prototype: + * FUNCTION func_name RETURN integer; + * PROCEDURE proc_name( parameters ); + * Procedure + * FUNCTION GET_ML_USERNAME RETURN VARCHAR2 + * IS + * BEGIN + * RETURN v_sync_user_id; + * END GET_ML_USERNAME; + * + * PROCEDURE proc_name( parameters ) + * IS + * BEGIN + * END; + * CREATE PROCEDURE proc_name( parameters ) + * EXTERNAL NAME ... ; + * CREATE PROCEDURE proc_name( parameters ) + * BEGIN + * END; + * + * CREATE FUNCTION f_GetClassName( + * IN @object VARCHAR(128) + * ,IN @code VARCHAR(128) + * ) + * RETURNS VARCHAR(200) + * DETERMINISTIC + * BEGIN + * + * IF( @object = 'user_state' ) THEN + * SET something = something; + * END IF; + * + * RETURN @name; + * END; + * + * Note, a Package adds scope to the items within. + * create or replace package demo_pkg is + * test_var number; + * function test_func return varchar2; + * function more.test_func2 return varchar2; + * end demo_pkg; + * So the tags generated here, contain the package name: + * demo_pkg.test_var + * demo_pkg.test_func + * demo_pkg.more.test_func2 + */ + const sqlKind kind = isKeyword (token, KEYWORD_function) ? + SQLTAG_FUNCTION : SQLTAG_PROCEDURE; + Assert (isKeyword (token, KEYWORD_function) || + isKeyword (token, KEYWORD_procedure)); - if (kind == SQLTAG_FUNCTION) - { - if (isKeyword (token, KEYWORD_return)) - { - /* Read RETURN */ - readToken (token); - /* Read datatype */ - readToken (token); - } - } - if( isType (token, TOKEN_SEMICOLON) ) - { - makeSqlTag (name, SQLTAG_PROTOTYPE); - } else { - while (!(isKeyword (token, KEYWORD_is) || - isKeyword (token, KEYWORD_begin) || - isType (token, TOKEN_SEMICOLON) - ) - ) - readToken (token); /* read return type */ - if (isKeyword (token, KEYWORD_is) || - isKeyword (token, KEYWORD_begin) ) - { - addToScope(token, name->string); - if (isType (name, TOKEN_IDENTIFIER) || - isType (name, TOKEN_STRING)) - makeSqlTag (name, kind); + vStringCopy(saveScope, token->scope); + saveScopeKind = token->scopeKind; + readToken (token); + copyToken (name, token); + readToken (token); - /*dispToken(name, "SubProgram: parseBlock name");*/ - /*dispToken(token, "SubProgram: parseBlock token");*/ - parseBlock (token, TRUE); - vStringClear (token->scope); - } - } - deleteToken (name); + if (isType (token, TOKEN_PERIOD)) + { + /* + * If this is an Oracle package, then the token->scope should + * already be set. If this is the case, also add this value to the + * scope. + * If this is not an Oracle package, chances are the scope should be + * blank and the value just read is the OWNER or CREATOR of the + * function and should not be considered part of the scope. + */ + if ( vStringLength(saveScope) > 0 ) + { + addToScope(token, name->string, kind); + } + readToken (token); + copyToken (name, token); + readToken (token); + } + if (isType (token, TOKEN_OPEN_PAREN)) + { + /* Reads to the next token after the TOKEN_CLOSE_PAREN */ + skipArgumentList(token); + } + + if (kind == SQLTAG_FUNCTION) + { + if (isKeyword (token, KEYWORD_return) || isKeyword (token, KEYWORD_returns)) + { + /* Read datatype */ + readToken (token); + /* + * Read token after which could be the + * command terminator if a prototype + * or an open parantheses + */ + readToken (token); + if (isType (token, TOKEN_OPEN_PAREN)) + { + /* Reads to the next token after the TOKEN_CLOSE_PAREN */ + skipArgumentList(token); + } + } + } + if( isCmdTerm (token) ) + { + makeSqlTag (name, SQLTAG_PROTOTYPE); + } + else + { + while (!(isKeyword (token, KEYWORD_is) || + isKeyword (token, KEYWORD_begin) || + isKeyword (token, KEYWORD_at) || + isKeyword (token, KEYWORD_internal) || + isKeyword (token, KEYWORD_external) || + isKeyword (token, KEYWORD_url) || + isType (token, TOKEN_EQUAL) || + isCmdTerm (token) + ) + ) + { + if ( isKeyword (token, KEYWORD_result) ) + { + readToken (token); + if (isType (token, TOKEN_OPEN_PAREN)) + { + /* Reads to the next token after the TOKEN_CLOSE_PAREN */ + skipArgumentList(token); + } + } else { + readToken (token); + } + } + if (isKeyword (token, KEYWORD_at) || + isKeyword (token, KEYWORD_url) || + isKeyword (token, KEYWORD_internal) || + isKeyword (token, KEYWORD_external) ) + { + addToScope(token, name->string, kind); + if (isType (name, TOKEN_IDENTIFIER) || + isType (name, TOKEN_STRING) || + !isKeyword (token, KEYWORD_NONE) + ) + makeSqlTag (name, kind); + + vStringClear (token->scope); + token->scopeKind = SQLTAG_COUNT; + } + if ( isType (token, TOKEN_EQUAL) ) + readToken (token); + + if ( isKeyword (token, KEYWORD_declare) ) + parseDeclare (token, FALSE); + + if (isKeyword (token, KEYWORD_is) || + isKeyword (token, KEYWORD_begin) ) + { + addToScope(token, name->string, kind); + if (isType (name, TOKEN_IDENTIFIER) || + isType (name, TOKEN_STRING) || + !isKeyword (token, KEYWORD_NONE) + ) + makeSqlTag (name, kind); + + parseBlock (token, TRUE); + vStringClear (token->scope); + token->scopeKind = SQLTAG_COUNT; + } + } + vStringCopy(token->scope, saveScope); + token->scopeKind = saveScopeKind; + deleteToken (name); + vStringDelete(saveScope); } static void parseRecord (tokenInfo *const token) { - /* Make it a bit forgiving, this is called from - * multiple functions, parseTable, parseType */ - if (!isType (token, TOKEN_OPEN_PAREN)) - readToken (token); + /* + * Make it a bit forgiving, this is called from + * multiple functions, parseTable, parseType + */ + if (!isType (token, TOKEN_OPEN_PAREN)) + readToken (token); - Assert (isType (token, TOKEN_OPEN_PAREN)); - do - { - if ( isType (token, TOKEN_COMMA) || isType (token, TOKEN_OPEN_PAREN) ) - readToken (token); + Assert (isType (token, TOKEN_OPEN_PAREN)); + do + { + if ( isType (token, TOKEN_COMMA) || isType (token, TOKEN_OPEN_PAREN) ) + readToken (token); - /* - * Create table statements can end with various constraints - * which must be excluded from the SQLTAG_FIELD. - * create table t1 ( - * c1 integer, - * c2 char(30), - * c3 numeric(10,5), - * c4 integer, - * constraint whatever, - * primary key(c1), - * foreign key (), - * check () - * ) - */ - if (! (isKeyword(token, KEYWORD_primary) || - isKeyword(token, KEYWORD_references) || - isKeyword(token, KEYWORD_unique) || - isKeyword(token, KEYWORD_check) || - isKeyword(token, KEYWORD_constraint) || - isKeyword(token, KEYWORD_foreign) ) ) - { - if (isType (token, TOKEN_IDENTIFIER) || - isType (token, TOKEN_STRING)) - makeSqlTag (token, SQLTAG_FIELD); - } + /* + * Create table statements can end with various constraints + * which must be excluded from the SQLTAG_FIELD. + * create table t1 ( + * c1 integer, + * c2 char(30), + * c3 numeric(10,5), + * c4 integer, + * constraint whatever, + * primary key(c1), + * foreign key (), + * check () + * ) + */ + if (! (isKeyword(token, KEYWORD_primary) || + isKeyword(token, KEYWORD_references) || + isKeyword(token, KEYWORD_unique) || + isKeyword(token, KEYWORD_check) || + isKeyword(token, KEYWORD_constraint) || + isKeyword(token, KEYWORD_foreign) ) ) + { + if (isType (token, TOKEN_IDENTIFIER) || + isType (token, TOKEN_STRING)) + makeSqlTag (token, SQLTAG_FIELD); + } - while (!(isType (token, TOKEN_COMMA) || - isType (token, TOKEN_CLOSE_PAREN) || - isType (token, TOKEN_OPEN_PAREN) - )) - { - readToken (token); - /* - * A table structure can look like this: - * create table t1 ( - * c1 integer, - * c2 char(30), - * c3 numeric(10,5), - * c4 integer - * ) - * We can't just look for a COMMA or CLOSE_PAREN - * since that will not deal with the numeric(10,5) - * case. So we need to skip the argument list - * when we find an open paren. - */ - if (isType (token, TOKEN_OPEN_PAREN)) - { - /* Reads to the next token after the TOKEN_CLOSE_PAREN */ - skipArgumentList(token); - } - } - } while (! isType (token, TOKEN_CLOSE_PAREN)); + while (!(isType (token, TOKEN_COMMA) || + isType (token, TOKEN_CLOSE_PAREN) || + isType (token, TOKEN_OPEN_PAREN) + )) + { + readToken (token); + /* + * A table structure can look like this: + * create table t1 ( + * c1 integer, + * c2 char(30), + * c3 numeric(10,5), + * c4 integer + * ) + * We can't just look for a COMMA or CLOSE_PAREN + * since that will not deal with the numeric(10,5) + * case. So we need to skip the argument list + * when we find an open paren. + */ + if (isType (token, TOKEN_OPEN_PAREN)) + { + /* Reads to the next token after the TOKEN_CLOSE_PAREN */ + skipArgumentList(token); + } + } + } while (! isType (token, TOKEN_CLOSE_PAREN)); } static void parseType (tokenInfo *const token) { - tokenInfo *const name = newToken (); - vString * saveScope = vStringNew (); + tokenInfo *const name = newToken (); + vString * saveScope = vStringNew (); + sqlKind saveScopeKind; - vStringCopy(saveScope, token->scope); - /* If a scope has been set, add it to the name */ - addToScope (name, token->scope); - readToken (name); - if (isType (name, TOKEN_IDENTIFIER)) - { - readToken (token); - if (isKeyword (token, KEYWORD_is)) + vStringCopy(saveScope, token->scope); + /* If a scope has been set, add it to the name */ + addToScope (name, token->scope, token->scopeKind); + saveScopeKind = token->scopeKind; + readToken (name); + if (isType (name, TOKEN_IDENTIFIER)) { - readToken (token); - addToScope (token, name->string); - switch (token->keyword) - { - case KEYWORD_record: - case KEYWORD_object: - makeSqlTag (name, SQLTAG_RECORD); - parseRecord (token); - break; + readToken (token); + if (isKeyword (token, KEYWORD_is)) + { + readToken (token); + switch (token->keyword) + { + case KEYWORD_record: + case KEYWORD_object: + makeSqlTag (name, SQLTAG_RECORD); + addToScope (token, name->string, SQLTAG_RECORD); + parseRecord (token); + break; - case KEYWORD_table: - makeSqlTag (name, SQLTAG_TABLE); - break; + case KEYWORD_table: + makeSqlTag (name, SQLTAG_TABLE); + break; - case KEYWORD_ref: - readToken (token); - if (isKeyword (token, KEYWORD_cursor)) - makeSqlTag (name, SQLTAG_CURSOR); - break; + case KEYWORD_ref: + readToken (token); + if (isKeyword (token, KEYWORD_cursor)) + makeSqlTag (name, SQLTAG_CURSOR); + break; - default: break; - } - vStringClear (token->scope); + default: break; + } + vStringClear (token->scope); + token->scopeKind = SQLTAG_COUNT; + } } - } - vStringCopy(token->scope, saveScope); - deleteToken (name); - vStringDelete(saveScope); + vStringCopy(token->scope, saveScope); + token->scopeKind = saveScopeKind; + deleteToken (name); + vStringDelete(saveScope); } static void parseSimple (tokenInfo *const token, const sqlKind kind) { - readToken (token); - if (isType (token, TOKEN_IDENTIFIER) || - isType (token, TOKEN_STRING)) - makeSqlTag (token, kind); + /* This will simply make the tagname from the first word found */ + readToken (token); + if (isType (token, TOKEN_IDENTIFIER) || + isType (token, TOKEN_STRING)) + makeSqlTag (token, kind); } static void parseDeclare (tokenInfo *const token, const boolean local) { - /* - * PL/SQL declares are of this format: - * IS|AS - * [declare] - * CURSOR curname ... - * varname1 datatype; - * varname2 datatype; - * varname3 datatype; - * begin - */ + /* + * PL/SQL declares are of this format: + * IS|AS + * [declare] + * CURSOR curname ... + * varname1 datatype; + * varname2 datatype; + * varname3 datatype; + * begin + */ - if (isKeyword (token, KEYWORD_declare)) - readToken (token); - while (! isKeyword (token, KEYWORD_begin) && ! isKeyword (token, KEYWORD_end)) - { - switch (token->keyword) + if (isKeyword (token, KEYWORD_declare)) + readToken (token); + while (! isKeyword (token, KEYWORD_begin) && ! isKeyword (token, KEYWORD_end)) { - case KEYWORD_cursor: parseSimple (token, SQLTAG_CURSOR); break; - case KEYWORD_function: parseSubProgram (token); break; - case KEYWORD_procedure: parseSubProgram (token); break; - case KEYWORD_subtype: parseSimple (token, SQLTAG_SUBTYPE); break; - case KEYWORD_trigger: parseSimple (token, SQLTAG_TRIGGER); break; - case KEYWORD_type: parseType (token); break; - - default: - if (isType (token, TOKEN_IDENTIFIER)) + switch (token->keyword) { - if (local) - { - makeSqlTag (token, SQLTAG_LOCAL_VARIABLE); - } else { - makeSqlTag (token, SQLTAG_VARIABLE); - } + case KEYWORD_cursor: parseSimple (token, SQLTAG_CURSOR); break; + case KEYWORD_function: parseSubProgram (token); break; + case KEYWORD_procedure: parseSubProgram (token); break; + case KEYWORD_subtype: parseSimple (token, SQLTAG_SUBTYPE); break; + case KEYWORD_trigger: parseSimple (token, SQLTAG_TRIGGER); break; + case KEYWORD_type: parseType (token); break; + + default: + if (isType (token, TOKEN_IDENTIFIER)) + { + if (local) + { + makeSqlTag (token, SQLTAG_LOCAL_VARIABLE); + } + else + { + makeSqlTag (token, SQLTAG_VARIABLE); + } + } + break; } - break; + findToken (token, TOKEN_SEMICOLON); + readToken (token); } - findToken (token, TOKEN_SEMICOLON); - readToken (token); - } } static void parseDeclareANSI (tokenInfo *const token, const boolean local) { - tokenInfo *const type = newToken (); - /* - * ANSI declares are of this format: - * BEGIN - * DECLARE varname1 datatype; - * DECLARE varname2 datatype; - * ... - * - * This differ from PL/SQL where DECLARE preceeds the BEGIN block - * and the DECLARE keyword is not repeated. - */ - while (isKeyword (token, KEYWORD_declare)) - { - readToken (token); - readToken (type); + tokenInfo *const type = newToken (); + /* + * ANSI declares are of this format: + * BEGIN + * DECLARE varname1 datatype; + * DECLARE varname2 datatype; + * ... + * + * This differ from PL/SQL where DECLARE preceeds the BEGIN block + * and the DECLARE keyword is not repeated. + */ + while (isKeyword (token, KEYWORD_declare)) + { + readToken (token); + readToken (type); - if (isKeyword (type, KEYWORD_cursor)) - makeSqlTag (token, SQLTAG_CURSOR); - else if (isKeyword (token, KEYWORD_local) && - isKeyword (type, KEYWORD_temporary)) - { - /* - * DECLARE LOCAL TEMPORARY TABLE table_name ( - * c1 int, - * c2 int - * ); - */ - readToken (token); - if (isKeyword (token, KEYWORD_table)) - { - readToken (token); - if (isType(token, TOKEN_IDENTIFIER) || - isType(token, TOKEN_STRING) ) - { - makeSqlTag (token, SQLTAG_TABLE); - } - } - } - else if (isType (token, TOKEN_IDENTIFIER) || - isType (token, TOKEN_STRING)) - { - if (local) - makeSqlTag (token, SQLTAG_LOCAL_VARIABLE); - else - makeSqlTag (token, SQLTAG_VARIABLE); - } - findToken (token, TOKEN_SEMICOLON); - readToken (token); - } - deleteToken (type); + if (isKeyword (type, KEYWORD_cursor)) + makeSqlTag (token, SQLTAG_CURSOR); + else if (isKeyword (token, KEYWORD_local) && + isKeyword (type, KEYWORD_temporary)) + { + /* + * DECLARE LOCAL TEMPORARY TABLE table_name ( + * c1 int, + * c2 int + * ); + */ + readToken (token); + if (isKeyword (token, KEYWORD_table)) + { + readToken (token); + if (isType(token, TOKEN_IDENTIFIER) || + isType(token, TOKEN_STRING) ) + { + makeSqlTag (token, SQLTAG_TABLE); + } + } + } + else if (isType (token, TOKEN_IDENTIFIER) || + isType (token, TOKEN_STRING)) + { + if (local) + makeSqlTag (token, SQLTAG_LOCAL_VARIABLE); + else + makeSqlTag (token, SQLTAG_VARIABLE); + } + findToken (token, TOKEN_SEMICOLON); + readToken (token); + } + deleteToken (type); } static void parseLabel (tokenInfo *const token) { - /* - * A label has this format: - * <> - * DECLARE - * v_senator VARCHAR2(100) := 'THURMOND, JESSE'; - * BEGIN - * IF total_contributions (v_senator, 'TOBACCO') > 25000 - * THEN - * <> - * DECLARE - * v_senator VARCHAR2(100) := 'WHATEVERIT, TAKES'; - * BEGIN - * ... - */ + /* + * A label has this format: + * <> + * DECLARE + * v_senator VARCHAR2(100) := 'THURMOND, JESSE'; + * BEGIN + * IF total_contributions (v_senator, 'TOBACCO') > 25000 + * THEN + * <> + * DECLARE + * v_senator VARCHAR2(100) := 'WHATEVERIT, TAKES'; + * BEGIN + * ... + */ - Assert (isType (token, TOKEN_BLOCK_LABEL_BEGIN)); - readToken (token); - if (isType (token, TOKEN_IDENTIFIER)) - { - makeSqlTag (token, SQLTAG_BLOCK_LABEL); - readToken (token); /* read end of label */ - } + Assert (isType (token, TOKEN_BLOCK_LABEL_BEGIN)); + readToken (token); + if (isType (token, TOKEN_IDENTIFIER)) + { + makeSqlTag (token, SQLTAG_BLOCK_LABEL); + readToken (token); /* read end of label */ + } } -static void parseStatements (tokenInfo *const token) +static void parseStatements (tokenInfo *const token, const boolean exit_on_endif ) { - do - { - if (isType (token, TOKEN_BLOCK_LABEL_BEGIN)) - parseLabel (token); - else - { - switch (token->keyword) - { - /* - * EXCEPTION - * ; - * - * Where an exception handler could be: - * BEGIN - * WHEN OTHERS THEN - * x := x + 3; - * END; - * In this case we need to skip this keyword and - * move on to the next token without reading until - * TOKEN_SEMICOLON; - */ - case KEYWORD_exception: - readToken (token); - continue; + boolean stmtTerm = FALSE; + do + { - /* - * WHEN statements can be used in exception clauses - * and CASE statements. The CASE statement should skip - * these given below we skip over to an END statement. - * But for an exception clause, we can have: - * EXCEPTION - * WHEN OTHERS THEN - * BEGIN - * x := x + 3; - * END; - * If we skip to the TOKEN_SEMICOLON, we miss the begin - * of a nested BEGIN END block. So read the next token - * after the THEN and restart the LOOP. - */ - case KEYWORD_when: - while (! isKeyword (token, KEYWORD_then)) - readToken (token); - readToken (token); - continue; + if (isType (token, TOKEN_BLOCK_LABEL_BEGIN)) + parseLabel (token); + else + { + switch (token->keyword) + { + case KEYWORD_exception: + /* + * EXCEPTION + * ; + * + * Where an exception handler could be: + * BEGIN + * WHEN OTHERS THEN + * x := x + 3; + * END; + * In this case we need to skip this keyword and + * move on to the next token without reading until + * TOKEN_SEMICOLON; + */ + readToken (token); + continue; - /* - * We do not want to look for a ; since for an empty - * IF block, that would skip over the END. - * IF...THEN - * END IF; - */ - case KEYWORD_if: - while (! isKeyword (token, KEYWORD_then)) - readToken (token); - /*readToken (token);*/ - parseStatements (token); - break; + case KEYWORD_when: + /* + * WHEN statements can be used in exception clauses + * and CASE statements. The CASE statement should skip + * these given below we skip over to an END statement. + * But for an exception clause, we can have: + * EXCEPTION + * WHEN OTHERS THEN + * BEGIN + * x := x + 3; + * END; + * If we skip to the TOKEN_SEMICOLON, we miss the begin + * of a nested BEGIN END block. So read the next token + * after the THEN and restart the LOOP. + */ + while (! isKeyword (token, KEYWORD_then)) + readToken (token); - /* - * LOOP... - * END LOOP; - * - * FOR loop_name AS cursor_name CURSOR FOR ... - * END FOR; - */ - case KEYWORD_loop: - case KEYWORD_case: - case KEYWORD_for: - readToken (token); - parseStatements (token); - break; + readToken (token); + continue; - case KEYWORD_declare: - case KEYWORD_begin: - parseBlock (token, TRUE); - break; + case KEYWORD_if: + /* + * We do not want to look for a ; since for an empty + * IF block, it would skip over the END. + * IF...THEN + * END IF; + * + * IF...THEN + * ELSE + * END IF; + * + * IF...THEN + * ELSEIF...THEN + * ELSE + * END IF; + * + * or non-ANSI + * IF ... + * BEGIN + * END + */ + while ( ! isKeyword (token, KEYWORD_then) && + ! isKeyword (token, KEYWORD_begin) ) + { + readToken (token); + } - default: - readToken (token); - break; - } - findToken (token, TOKEN_SEMICOLON); - } - readToken (token); - } while (! isKeyword (token, KEYWORD_end)); + if( isKeyword (token, KEYWORD_begin ) ) + { + parseBlock(token, FALSE); + + /* + * Handle the non-Ansi IF blocks. + * parseBlock consumes the END, so if the next + * token in a command terminator (like GO) + * we know we are done with this statement. + */ + if ( isCmdTerm (token) ) + stmtTerm = TRUE; + } + else + { + readToken (token); + + while( ! (isKeyword (token, KEYWORD_end ) || + isKeyword (token, KEYWORD_endif ) ) + ) + { + if ( isKeyword (token, KEYWORD_else) || + isKeyword (token, KEYWORD_elseif) ) + readToken (token); + + parseStatements (token, TRUE); + + if ( isCmdTerm(token) ) + readToken (token); + + } + + /* + * parseStatements returns when it finds an END, an IF + * should follow the END for ANSI anyway. + * IF...THEN + * END IF; + */ + if( isKeyword (token, KEYWORD_end ) ) + readToken (token); + + if( isKeyword (token, KEYWORD_if ) || isKeyword (token, KEYWORD_endif ) ) + { + readToken (token); + if ( isCmdTerm(token) ) + stmtTerm = TRUE; + } + else + { + /* + * Well we need to do something here. + * There are lots of different END statements + * END; + * END CASE; + * ENDIF; + * ENDCASE; + */ + } + } + break; + + case KEYWORD_loop: + case KEYWORD_case: + case KEYWORD_for: + /* + * LOOP... + * END LOOP; + * + * CASE + * WHEN '1' THEN + * END CASE; + * + * FOR loop_name AS cursor_name CURSOR FOR ... + * DO + * END FOR; + */ + if( isKeyword (token, KEYWORD_for ) ) + { + /* loop name */ + readToken (token); + /* AS */ + readToken (token); + + while ( ! isKeyword (token, KEYWORD_is) ) + { + /* + * If this is not an AS keyword this is + * not a proper FOR statement and should + * simply be ignored + */ + return; + } + + while ( ! isKeyword (token, KEYWORD_do) ) + readToken (token); + } + + + readToken (token); + while( ! isKeyword (token, KEYWORD_end ) ) + { + /* + if ( isKeyword (token, KEYWORD_else) || + isKeyword (token, KEYWORD_elseif) ) + readToken (token); + */ + + parseStatements (token, FALSE); + + if ( isCmdTerm(token) ) + readToken (token); + } + + + if( isKeyword (token, KEYWORD_end ) ) + readToken (token); + + /* + * Typically ended with + * END LOOP [loop name]; + * END CASE + * END FOR [loop name]; + */ + if ( isKeyword (token, KEYWORD_loop) || + isKeyword (token, KEYWORD_case) || + isKeyword (token, KEYWORD_for) ) + readToken (token); + + if ( isCmdTerm(token) ) + stmtTerm = TRUE; + + break; + + case KEYWORD_create: + readToken (token); + parseKeywords(token); + break; + + case KEYWORD_declare: + case KEYWORD_begin: + parseBlock (token, TRUE); + break; + + case KEYWORD_end: + break; + + default: + readToken (token); + break; + } + /* + * Not all statements must end in a semi-colon + * begin + * if current publisher <> 'publish' then + * signal UE_FailStatement + * end if + * end; + * The last statement prior to an end ("signal" above) does + * not need a semi-colon, nor does the end if, since it is + * also the last statement prior to the end of the block. + * + * So we must read to the first semi-colon or an END block + */ + while ( ! stmtTerm && + ! ( isKeyword (token, KEYWORD_end) || + (isCmdTerm(token)) ) + ) + { + if ( isKeyword (token, KEYWORD_endif) && + exit_on_endif ) + return; + + if (isType (token, TOKEN_COLON) ) + { + /* + * A : can signal a loop name + * myloop: + * LOOP + * LEAVE myloop; + * END LOOP; + * Unfortunately, labels do not have a + * cmd terminator, therefore we have to check + * if the next token is a keyword and process + * it accordingly. + */ + readToken (token); + if ( isKeyword (token, KEYWORD_loop) || + isKeyword (token, KEYWORD_while) || + isKeyword (token, KEYWORD_for) ) + /* parseStatements (token); */ + return; + } + + readToken (token); + + if (isType (token, TOKEN_OPEN_PAREN) || + isType (token, TOKEN_OPEN_CURLY) || + isType (token, TOKEN_OPEN_SQUARE) ) + skipToMatched (token); + + /* + * Since we know how to parse various statements + * if we detect them, parse them to completion + */ + if (isType (token, TOKEN_BLOCK_LABEL_BEGIN) || + isKeyword (token, KEYWORD_exception) || + isKeyword (token, KEYWORD_loop) || + isKeyword (token, KEYWORD_case) || + isKeyword (token, KEYWORD_for) || + isKeyword (token, KEYWORD_begin) ) + parseStatements (token, FALSE); + else if (isKeyword (token, KEYWORD_if)) + parseStatements (token, TRUE); + + } + } + /* + * We assumed earlier all statements ended with a command terminator. + * See comment above, now, only read if the current token + * is not a command terminator. + */ + if ( isCmdTerm(token) && ! stmtTerm ) + stmtTerm = TRUE; + + } while (! isKeyword (token, KEYWORD_end) && + ! (exit_on_endif && isKeyword (token, KEYWORD_endif) ) && + ! stmtTerm ); } static void parseBlock (tokenInfo *const token, const boolean local) { - if (isType (token, TOKEN_BLOCK_LABEL_BEGIN)) - { - parseLabel (token); - readToken (token); - } - if (! isKeyword (token, KEYWORD_begin)) - { - readToken (token); - /*dispToken(token, "parseBlock calling parseDeclare");*/ - parseDeclare (token, local); - } - if (isKeyword (token, KEYWORD_begin)) - { - readToken (token); - parseDeclareANSI (token, local); - while (! isKeyword (token, KEYWORD_end)) - parseStatements (token); - findToken (token, TOKEN_SEMICOLON); - } + if (isType (token, TOKEN_BLOCK_LABEL_BEGIN)) + { + parseLabel (token); + readToken (token); + } + if (! isKeyword (token, KEYWORD_begin)) + { + readToken (token); + /* + * These are Oracle style declares which generally come + * between an IS/AS and BEGIN block. + */ + parseDeclare (token, local); + } + if (isKeyword (token, KEYWORD_begin)) + { + readToken (token); + /* + * Check for ANSI declarations which always follow + * a BEGIN statement. This routine will not advance + * the token if none are found. + */ + parseDeclareANSI (token, local); + token->begin_end_nest_lvl++; + while (! isKeyword (token, KEYWORD_end)) + { + parseStatements (token, FALSE); + + if ( isCmdTerm(token) ) + readToken (token); + } + token->begin_end_nest_lvl--; + + /* + * Read the next token (we will assume + * it is the command delimiter) + */ + readToken (token); + + /* + * Check if the END block is terminated + */ + if ( !isCmdTerm (token) ) + { + /* + * Not sure what to do here at the moment. + * I think the routine that calls parseBlock + * must expect the next token has already + * been read since it is possible this + * token is not a command delimiter. + */ + /* findCmdTerm (token, FALSE); */ + } + } } static void parsePackage (tokenInfo *const token) { - /* - * Packages can be specified in a number of ways: - * CREATE OR REPLACE PACKAGE pkg_name AS - * or - * CREATE OR REPLACE PACKAGE owner.pkg_name AS - * or by specifying a package body - * CREATE OR REPLACE PACKAGE BODY pkg_name AS - * CREATE OR REPLACE PACKAGE BODY owner.pkg_name AS - */ - tokenInfo *const name = newToken (); - readToken (name); - if (isKeyword (name, KEYWORD_body)) - readToken (name); - /*dispToken(token, "parsePackage after BODY");*/ - /* Chceck for owner.pkg_name */ - while (! isKeyword (token, KEYWORD_is)) - { - readToken (token); - if ( isType(token, TOKEN_PERIOD) ) - { - readToken (name); - /*dispToken(name, "parsePackage new name");*/ - } - } - dispToken(name, "parsePackage name"); - if (isKeyword (token, KEYWORD_is)) - { - /*dispToken(token, "parsePackage processing IS");*/ - if (isType (name, TOKEN_IDENTIFIER) || - isType (name, TOKEN_STRING)) - makeSqlTag (name, SQLTAG_PACKAGE); - parseBlock (token, FALSE); - } - findToken (token, TOKEN_SEMICOLON); - deleteToken (name); + /* + * Packages can be specified in a number of ways: + * CREATE OR REPLACE PACKAGE pkg_name AS + * or + * CREATE OR REPLACE PACKAGE owner.pkg_name AS + * or by specifying a package body + * CREATE OR REPLACE PACKAGE BODY pkg_name AS + * CREATE OR REPLACE PACKAGE BODY owner.pkg_name AS + */ + tokenInfo *const name = newToken (); + readIdentifier (name); + if (isKeyword (name, KEYWORD_body)) + { + /* + * Ignore the BODY tag since we will process + * the body or prototypes in the same manner + */ + readIdentifier (name); + } + /* Check for owner.pkg_name */ + while (! isKeyword (token, KEYWORD_is)) + { + readToken (token); + if ( isType(token, TOKEN_PERIOD) ) + { + readIdentifier (name); + } + } + if (isKeyword (token, KEYWORD_is)) + { + if (isType (name, TOKEN_IDENTIFIER) || + isType (name, TOKEN_STRING)) + makeSqlTag (name, SQLTAG_PACKAGE); + addToScope (token, name->string, SQLTAG_PACKAGE); + parseBlock (token, FALSE); + vStringClear (token->scope); + token->scopeKind = SQLTAG_COUNT; + } + findCmdTerm (token, FALSE); + deleteToken (name); } static void parseTable (tokenInfo *const token) { - tokenInfo *const name = newToken (); + tokenInfo *const name = newToken (); - /* This deals with these formats - * create table t1 (c1 int); - * create global tempoary table t2 (c1 int); - * create table "t3" (c1 int); - * create table bob.t4 (c1 int); - * create table bob."t5" (c1 int); - * create table "bob"."t6" (c1 int); - * create table bob."t7" (c1 int); - * Proxy tables use this format: - * create existing table bob."t7" AT '...'; */ + /* + * This deals with these formats: + * create table t1 (c1 int); + * create global tempoary table t2 (c1 int); + * create table "t3" (c1 int); + * create table bob.t4 (c1 int); + * create table bob."t5" (c1 int); + * create table "bob"."t6" (c1 int); + * create table bob."t7" (c1 int); + * Proxy tables use this format: + * create existing table bob."t7" AT '...'; + * SQL Server and Sybase formats + * create table OnlyTable ( + * create table dbo.HasOwner ( + * create table [dbo].[HasOwnerSquare] ( + * create table master.dbo.HasDb ( + * create table master..HasDbNoOwner ( + * create table [master].dbo.[HasDbAndOwnerSquare] ( + * create table [master]..[HasDbNoOwnerSquare] ( + */ - readToken (name); - readToken (token); - if (isType (token, TOKEN_PERIOD)) - { - readToken (name); - readToken (token); - } - if (isType (token, TOKEN_OPEN_PAREN)) - { - if (isType (name, TOKEN_IDENTIFIER) || - isType (name, TOKEN_STRING)) - { - makeSqlTag (name, SQLTAG_TABLE); - vStringCopy(token->scope, name->string); - parseRecord (token); - vStringClear (token->scope); - } - } else if (isKeyword (token, KEYWORD_at)) - { - if (isType (name, TOKEN_IDENTIFIER)) - { - makeSqlTag (name, SQLTAG_TABLE); - } - } - findToken (token, TOKEN_SEMICOLON); - deleteToken (name); + /* This could be a database, owner or table name */ + readIdentifier (name); + readToken (token); + if (isType (token, TOKEN_PERIOD)) + { + /* + * This could be a owner or table name. + * But this is also a special case since the table can be + * referenced with a blank owner: + * dbname..tablename + */ + readIdentifier (name); + /* Check if a blank name was provided */ + if (isType (name, TOKEN_PERIOD)) + { + readIdentifier (name); + } + readToken (token); + if (isType (token, TOKEN_PERIOD)) + { + /* This can only be the table name */ + readIdentifier (name); + readToken (token); + } + } + if (isType (token, TOKEN_OPEN_PAREN)) + { + if (isType (name, TOKEN_IDENTIFIER) || + isType (name, TOKEN_STRING)) + { + makeSqlTag (name, SQLTAG_TABLE); + vStringCopy(token->scope, name->string); + token->scopeKind = SQLTAG_TABLE; + parseRecord (token); + vStringClear (token->scope); + token->scopeKind = SQLTAG_COUNT; + } + } + else if (isKeyword (token, KEYWORD_at)) + { + if (isType (name, TOKEN_IDENTIFIER)) + { + makeSqlTag (name, SQLTAG_TABLE); + } + } + findCmdTerm (token, FALSE); + deleteToken (name); } static void parseIndex (tokenInfo *const token) { - tokenInfo *const name = newToken (); - tokenInfo *const owner = newToken (); + tokenInfo *const name = newToken (); + tokenInfo *const owner = newToken (); - /* This deals with these formats - * create index i1 on t1(c1) create index "i2" on t1(c1) - * create virtual unique clustered index "i3" on t1(c1) - * create unique clustered index "i4" on t1(c1) - * create clustered index "i5" on t1(c1) - * create bitmap index "i6" on t1(c1) */ + /* + * This deals with these formats + * create index i1 on t1(c1) create index "i2" on t1(c1) + * create virtual unique clustered index "i3" on t1(c1) + * create unique clustered index "i4" on t1(c1) + * create clustered index "i5" on t1(c1) + * create bitmap index "i6" on t1(c1) + */ - readToken (name); - readToken (token); - if (isType (token, TOKEN_PERIOD)) - { - readToken (name); - readToken (token); - } - if ( isKeyword (token, KEYWORD_on) && - (isType (name, TOKEN_IDENTIFIER) || isType (name, TOKEN_STRING) ) ) - { - readToken (owner); - readToken (token); - if (isType (token, TOKEN_PERIOD)) - { - readToken (owner); - readToken (token); - } - addToScope(name, owner->string); - makeSqlTag (name, SQLTAG_INDEX); - } - findToken (token, TOKEN_SEMICOLON); - deleteToken (name); - deleteToken (owner); + readIdentifier (name); + readToken (token); + if (isType (token, TOKEN_PERIOD)) + { + readIdentifier (name); + readToken (token); + } + if ( isKeyword (token, KEYWORD_on) && + (isType (name, TOKEN_IDENTIFIER) || isType (name, TOKEN_STRING) ) ) + { + readIdentifier (owner); + readToken (token); + if (isType (token, TOKEN_PERIOD)) + { + readIdentifier (owner); + readToken (token); + } + addToScope(name, owner->string, SQLTAG_TABLE /* FIXME? */); + makeSqlTag (name, SQLTAG_INDEX); + } + findCmdTerm (token, FALSE); + deleteToken (name); + deleteToken (owner); } static void parseEvent (tokenInfo *const token) { - tokenInfo *const name = newToken (); + tokenInfo *const name = newToken (); - /* This deals with these formats - * create event e1 handler begin end; - * create event "e2" handler begin end; - * create event dba."e3" handler begin end; - * create event "dba"."e4" handler begin end; */ + /* + * This deals with these formats + * create event e1 handler begin end; + * create event "e2" handler begin end; + * create event dba."e3" handler begin end; + * create event "dba"."e4" handler begin end; + */ - readToken (name); - readToken (token); - if (isType (token, TOKEN_PERIOD)) - { - readToken (name); - } - while (! (isKeyword (token, KEYWORD_handler) || - (isType (token, TOKEN_SEMICOLON))) ) - { - readToken (token); - } + readIdentifier (name); + readToken (token); + if (isType (token, TOKEN_PERIOD)) + { + readIdentifier (name); + } + while (! (isKeyword (token, KEYWORD_handler) || + (isType (token, TOKEN_SEMICOLON))) ) + { + readToken (token); + } - if ( isKeyword (token, KEYWORD_handler) || - isType (token, TOKEN_SEMICOLON) ) - { - makeSqlTag (name, SQLTAG_EVENT); - } + if ( isKeyword (token, KEYWORD_handler) || + isType (token, TOKEN_SEMICOLON) ) + { + makeSqlTag (name, SQLTAG_EVENT); + } - if (isKeyword (token, KEYWORD_handler)) - { - readToken (token); - if ( isKeyword (token, KEYWORD_begin) ) - { - parseBlock (token, TRUE); - } - findToken (token, TOKEN_SEMICOLON); - } - deleteToken (name); + if (isKeyword (token, KEYWORD_handler)) + { + readToken (token); + if ( isKeyword (token, KEYWORD_begin) ) + { + parseBlock (token, TRUE); + } + findCmdTerm (token, TRUE); + } + deleteToken (name); } static void parseTrigger (tokenInfo *const token) { - tokenInfo *const name = newToken (); - tokenInfo *const table = newToken (); + tokenInfo *const name = newToken (); + tokenInfo *const table = newToken (); - /* This deals with these formats - * create or replace trigger tr1 begin end; - * create trigger "tr2" begin end; - * drop trigger "droptr1"; - * create trigger "tr3" CALL sp_something(); - * create trigger "owner"."tr4" begin end; - * create trigger "tr5" not valid; - * create trigger "tr6" begin end; */ + /* + * This deals with these formats + * create or replace trigger tr1 begin end; + * create trigger "tr2" begin end; + * drop trigger "droptr1"; + * create trigger "tr3" CALL sp_something(); + * create trigger "owner"."tr4" begin end; + * create trigger "tr5" not valid; + * create trigger "tr6" begin end; + */ - readToken (name); - readToken (token); - if (isType (token, TOKEN_PERIOD)) - { - readToken (name); - readToken (token); - } + readIdentifier (name); + readToken (token); + if (isType (token, TOKEN_PERIOD)) + { + readIdentifier (name); + readToken (token); + } - while (! (isKeyword (token, KEYWORD_on) || - ( isType (token, TOKEN_SEMICOLON))) ) - { - readToken (token); - } + while ( !isKeyword (token, KEYWORD_on) && + !isCmdTerm (token) ) + { + readToken (token); + } - if (! isType (token, TOKEN_SEMICOLON) ) - { - readToken (table); - readToken (token); - if (isType (token, TOKEN_PERIOD)) - { - readToken (table); - readToken (token); - } + /*if (! isType (token, TOKEN_SEMICOLON) ) */ + if (! isCmdTerm (token) ) + { + readToken (table); + readToken (token); + if (isType (token, TOKEN_PERIOD)) + { + readToken (table); + readToken (token); + } - while (! (isKeyword (token, KEYWORD_begin) || - (isKeyword (token, KEYWORD_call)) || - ( isType (token, TOKEN_SEMICOLON))) ) - { - readToken (token); - if ( isKeyword (token, KEYWORD_declare) ) - { - addToScope(token, name->string); - parseDeclare(token, TRUE); - vStringClear(token->scope); - } - } + while (! (isKeyword (token, KEYWORD_begin) || + (isKeyword (token, KEYWORD_call)) || + ( isCmdTerm (token))) ) + { + if ( isKeyword (token, KEYWORD_declare) ) + { + addToScope(token, name->string, SQLTAG_TRIGGER); + parseDeclare(token, TRUE); + vStringClear(token->scope); + token->scopeKind = SQLTAG_COUNT; + } + else + readToken (token); + } - if ( isKeyword (token, KEYWORD_begin) || - isKeyword (token, KEYWORD_call) ) - { - addToScope(name, table->string); - makeSqlTag (name, SQLTAG_TRIGGER); - addToScope(token, table->string); - if ( isKeyword (token, KEYWORD_begin) ) - { - parseBlock (token, TRUE); - } - vStringClear(token->scope); - } - } + if ( isKeyword (token, KEYWORD_begin) || + isKeyword (token, KEYWORD_call) ) + { + addToScope(name, table->string, SQLTAG_TABLE); + makeSqlTag (name, SQLTAG_TRIGGER); + addToScope(token, table->string, SQLTAG_TABLE); + if ( isKeyword (token, KEYWORD_begin) ) + { + parseBlock (token, TRUE); + } + vStringClear(token->scope); + token->scopeKind = SQLTAG_COUNT; + } + } - if (! isType (token, TOKEN_SEMICOLON) ) - { - findToken (token, TOKEN_SEMICOLON); - } - deleteToken (name); - deleteToken (table); + findCmdTerm (token, TRUE); + deleteToken (name); + deleteToken (table); } static void parsePublication (tokenInfo *const token) { - tokenInfo *const name = newToken (); + tokenInfo *const name = newToken (); - /* This deals with these formats - * create or replace publication pu1 () - * create publication "pu2" () - * create publication dba."pu3" () - * create publication "dba"."pu4" () */ + /* + * This deals with these formats + * create or replace publication pu1 () + * create publication "pu2" () + * create publication dba."pu3" () + * create publication "dba"."pu4" () + */ - readToken (name); - readToken (token); - if (isType (token, TOKEN_PERIOD)) - { - readToken (name); - readToken (token); - } - if (isType (token, TOKEN_OPEN_PAREN)) - { - if (isType (name, TOKEN_IDENTIFIER) || - isType (name, TOKEN_STRING)) + readIdentifier (name); + readToken (token); + if (isType (token, TOKEN_PERIOD)) { - makeSqlTag (name, SQLTAG_PUBLICATION); + readIdentifier (name); + readToken (token); } - } - findToken (token, TOKEN_SEMICOLON); - deleteToken (name); + if (isType (token, TOKEN_OPEN_PAREN)) + { + if (isType (name, TOKEN_IDENTIFIER) || + isType (name, TOKEN_STRING)) + { + makeSqlTag (name, SQLTAG_PUBLICATION); + } + } + findCmdTerm (token, FALSE); + deleteToken (name); } static void parseService (tokenInfo *const token) { - tokenInfo *const name = newToken (); + tokenInfo *const name = newToken (); - /* This deals with these formats - * CREATE SERVICE s1 TYPE 'HTML' - * AUTHORIZATION OFF USER DBA AS - * SELECT * - * FROM SYS.SYSTABLE; - * CREATE SERVICE "s2" TYPE 'HTML' - * AUTHORIZATION OFF USER DBA AS - * CALL sp_Something(); */ + /* + * This deals with these formats + * CREATE SERVICE s1 TYPE 'HTML' + * AUTHORIZATION OFF USER DBA AS + * SELECT * + * FROM SYS.SYSTABLE; + * CREATE SERVICE "s2" TYPE 'HTML' + * AUTHORIZATION OFF USER DBA AS + * CALL sp_Something(); + */ - readToken (name); - readToken (token); - if (isKeyword (token, KEYWORD_type)) - { - if (isType (name, TOKEN_IDENTIFIER) || - isType (name, TOKEN_STRING)) - { - makeSqlTag (name, SQLTAG_SERVICE); - } - } - findToken (token, TOKEN_SEMICOLON); - deleteToken (name); + readIdentifier (name); + readToken (token); + if (isKeyword (token, KEYWORD_type)) + { + if (isType (name, TOKEN_IDENTIFIER) || + isType (name, TOKEN_STRING)) + { + makeSqlTag (name, SQLTAG_SERVICE); + } + } + findCmdTerm (token, FALSE); + deleteToken (name); } static void parseDomain (tokenInfo *const token) { - tokenInfo *const name = newToken (); + tokenInfo *const name = newToken (); - /* This deals with these formats - * CREATE DOMAIN|DATATYPE [AS] your_name ...; */ + /* + * This deals with these formats + * CREATE DOMAIN|DATATYPE [AS] your_name ...; + */ - readToken (name); - if (isKeyword (name, KEYWORD_is)) - { - readToken (name); - } - readToken (token); - if (isType (name, TOKEN_IDENTIFIER) || - isType (name, TOKEN_STRING)) - { - makeSqlTag (name, SQLTAG_DOMAIN); - } - findToken (token, TOKEN_SEMICOLON); - deleteToken (name); + readIdentifier (name); + if (isKeyword (name, KEYWORD_is)) + { + readIdentifier (name); + } + readToken (token); + if (isType (name, TOKEN_IDENTIFIER) || + isType (name, TOKEN_STRING)) + { + makeSqlTag (name, SQLTAG_DOMAIN); + } + findCmdTerm (token, FALSE); + deleteToken (name); } static void parseDrop (tokenInfo *const token) { - /* This deals with these formats - * DROP TABLE|PROCEDURE|DOMAIN|DATATYPE name; - * - * Just simply skip over these statements. - * They are often confused with PROCEDURE prototypes - * since the syntax is similar, this effectively deals with - * the issue for all types. */ + /* + * This deals with these formats + * DROP TABLE|PROCEDURE|DOMAIN|DATATYPE name; + * + * Just simply skip over these statements. + * They are often confused with PROCEDURE prototypes + * since the syntax is similar, this effectively deals with + * the issue for all types. + */ - /*dispToken(token, "parseDrop");*/ - findToken (token, TOKEN_SEMICOLON); + findCmdTerm (token, FALSE); } static void parseVariable (tokenInfo *const token) { - tokenInfo *const name = newToken (); + tokenInfo *const name = newToken (); - /* This deals with these formats - * create variable varname1 integer; - * create variable @varname2 integer; - * create variable "varname3" integer; - * drop variable @varname3; */ + /* + * This deals with these formats + * create variable varname1 integer; + * create variable @varname2 integer; + * create variable "varname3" integer; + * drop variable @varname3; + */ - readToken (name); - readToken (token); - if ( (isType (name, TOKEN_IDENTIFIER) || isType (name, TOKEN_STRING)) - && !isType (token, TOKEN_SEMICOLON) ) - { - makeSqlTag (name, SQLTAG_VARIABLE); - } - if (! isType (token, TOKEN_SEMICOLON) ) - findToken (token, TOKEN_SEMICOLON); + readIdentifier (name); + readToken (token); + if ( (isType (name, TOKEN_IDENTIFIER) || isType (name, TOKEN_STRING)) + && !isType (token, TOKEN_SEMICOLON) ) + { + makeSqlTag (name, SQLTAG_VARIABLE); + } + findCmdTerm (token, TRUE); - deleteToken (name); + deleteToken (name); } static void parseSynonym (tokenInfo *const token) { - tokenInfo *const name = newToken (); + tokenInfo *const name = newToken (); - /* This deals with these formats - * create variable varname1 integer; - * create variable @varname2 integer; - * create variable "varname3" integer; - * drop variable @varname3; */ + /* + * This deals with these formats + * create variable varname1 integer; + * create variable @varname2 integer; + * create variable "varname3" integer; + * drop variable @varname3; + */ - readToken (name); - readToken (token); - if ( (isType (name, TOKEN_IDENTIFIER) || isType (name, TOKEN_STRING)) - && isKeyword (token, KEYWORD_for) ) - { - makeSqlTag (name, SQLTAG_SYNONYM); - } - if (! isType (token, TOKEN_SEMICOLON) ) - findToken (token, TOKEN_SEMICOLON); + readIdentifier (name); + readToken (token); + if ( (isType (name, TOKEN_IDENTIFIER) || isType (name, TOKEN_STRING)) + && isKeyword (token, KEYWORD_for) ) + { + makeSqlTag (name, SQLTAG_SYNONYM); + } + findCmdTerm (token, TRUE); - deleteToken (name); + deleteToken (name); } static void parseView (tokenInfo *const token) { - tokenInfo *const name = newToken (); + tokenInfo *const name = newToken (); - /* This deals with these formats - * create variable varname1 integer; - * create variable @varname2 integer; - * create variable "varname3" integer; - * drop variable @varname3; */ + /* + * This deals with these formats + * create variable varname1 integer; + * create variable @varname2 integer; + * create variable "varname3" integer; + * drop variable @varname3; + */ - readToken (name); - readToken (token); - if (isType (token, TOKEN_PERIOD)) - { - readToken (name); - readToken (token); - } - if ( isType (token, TOKEN_OPEN_PAREN) ) - { - skipArgumentList(token); + readIdentifier (name); + readToken (token); + if (isType (token, TOKEN_PERIOD)) + { + readIdentifier (name); + readToken (token); + } + if ( isType (token, TOKEN_OPEN_PAREN) ) + { + skipArgumentList(token); - } + } - while (!(isKeyword (token, KEYWORD_is) || - isType (token, TOKEN_SEMICOLON) - )) - { - readToken (token); - } + while (!(isKeyword (token, KEYWORD_is) || + isType (token, TOKEN_SEMICOLON) + )) + { + readToken (token); + } - if ( (isType (name, TOKEN_IDENTIFIER) || isType (name, TOKEN_STRING)) - && isKeyword (token, KEYWORD_is) ) - { - makeSqlTag (name, SQLTAG_VIEW); - } + if ( (isType (name, TOKEN_IDENTIFIER) || isType (name, TOKEN_STRING)) + && isKeyword (token, KEYWORD_is) ) + { + makeSqlTag (name, SQLTAG_VIEW); + } - if (! isType (token, TOKEN_SEMICOLON) ) - findToken (token, TOKEN_SEMICOLON); + findCmdTerm (token, TRUE); - deleteToken (name); + deleteToken (name); } static void parseMLTable (tokenInfo *const token) { - tokenInfo *const version = newToken (); - tokenInfo *const table = newToken (); - tokenInfo *const event = newToken (); + tokenInfo *const version = newToken (); + tokenInfo *const table = newToken (); + tokenInfo *const event = newToken (); - /* This deals with these formats - * call ml_add_table_script( 'version', 'table_name', 'event', - * 'some SQL statement' - * ); */ + /* + * This deals with these formats + * call dbo.ml_add_table_script( 'version', 'table_name', 'event', + * 'some SQL statement' + * ); + */ - readToken (token); - if ( isType (token, TOKEN_OPEN_PAREN) ) - { - readToken (version); - readToken (token); - while (!(isType (token, TOKEN_COMMA) || - isType (token, TOKEN_CLOSE_PAREN) - )) - { - readToken (token); - } + readToken (token); + if ( isType (token, TOKEN_OPEN_PAREN) ) + { + readToken (version); + readToken (token); + while (!(isType (token, TOKEN_COMMA) || + isType (token, TOKEN_CLOSE_PAREN) + )) + { + readToken (token); + } - if (isType (token, TOKEN_COMMA)) - { - readToken (table); - readToken (token); - while (!(isType (token, TOKEN_COMMA) || - isType (token, TOKEN_CLOSE_PAREN) - )) - { - readToken (token); - } + if (isType (token, TOKEN_COMMA)) + { + readToken (table); + readToken (token); + while (!(isType (token, TOKEN_COMMA) || + isType (token, TOKEN_CLOSE_PAREN) + )) + { + readToken (token); + } - if (isType (token, TOKEN_COMMA)) - { - readToken (event); + if (isType (token, TOKEN_COMMA)) + { + readToken (event); - if (isType (version, TOKEN_STRING) && - isType (table, TOKEN_STRING) && - isType (event, TOKEN_STRING) ) - { - addToScope(version, table->string); - addToScope(version, event->string); - makeSqlTag (version, SQLTAG_MLTABLE); - } - } - if( !isType (token, TOKEN_CLOSE_PAREN) ) - findToken (token, TOKEN_CLOSE_PAREN); - } - } + if (isType (version, TOKEN_STRING) && + isType (table, TOKEN_STRING) && + isType (event, TOKEN_STRING) ) + { + addToScope(version, table->string, SQLTAG_TABLE); + addToScope(version, event->string, SQLTAG_EVENT); + makeSqlTag (version, SQLTAG_MLTABLE); + } + } + if( !isType (token, TOKEN_CLOSE_PAREN) ) + findToken (token, TOKEN_CLOSE_PAREN); + } + } - if (! isType (token, TOKEN_SEMICOLON) ) - findToken (token, TOKEN_SEMICOLON); + findCmdTerm (token, TRUE); - deleteToken (version); - deleteToken (table); - deleteToken (event); + deleteToken (version); + deleteToken (table); + deleteToken (event); } static void parseMLConn (tokenInfo *const token) { - tokenInfo *const version = newToken (); - tokenInfo *const event = newToken (); + tokenInfo *const version = newToken (); + tokenInfo *const event = newToken (); - /* This deals with these formats - * call ml_add_connection_script( 'version', 'event', - * 'some SQL statement' - * ); */ + /* + * This deals with these formats + * call ml_add_connection_script( 'version', 'event', + * 'some SQL statement' + * ); + */ - readToken (token); - if ( isType (token, TOKEN_OPEN_PAREN) ) - { - readToken (version); - readToken (token); - while (!(isType (token, TOKEN_COMMA) || - isType (token, TOKEN_CLOSE_PAREN) - )) - { - readToken (token); - } + readToken (token); + if ( isType (token, TOKEN_OPEN_PAREN) ) + { + readToken (version); + readToken (token); + while (!(isType (token, TOKEN_COMMA) || + isType (token, TOKEN_CLOSE_PAREN) + )) + { + readToken (token); + } - if (isType (token, TOKEN_COMMA)) - { - readToken (event); + if (isType (token, TOKEN_COMMA)) + { + readToken (event); - if (isType (version, TOKEN_STRING) && - isType (event, TOKEN_STRING) ) - { - addToScope(version, event->string); - makeSqlTag (version, SQLTAG_MLCONN); - } - } - if( !isType (token, TOKEN_CLOSE_PAREN) ) - findToken (token, TOKEN_CLOSE_PAREN); + if (isType (version, TOKEN_STRING) && + isType (event, TOKEN_STRING) ) + { + addToScope(version, event->string, SQLTAG_EVENT); + makeSqlTag (version, SQLTAG_MLCONN); + } + } + if( !isType (token, TOKEN_CLOSE_PAREN) ) + findToken (token, TOKEN_CLOSE_PAREN); - } + } - if (! isType (token, TOKEN_SEMICOLON) ) - findToken (token, TOKEN_SEMICOLON); + findCmdTerm (token, TRUE); - deleteToken (version); - deleteToken (event); + deleteToken (version); + deleteToken (event); +} + +static void parseMLProp (tokenInfo *const token) +{ + tokenInfo *const component = newToken (); + tokenInfo *const prop_set_name = newToken (); + tokenInfo *const prop_name = newToken (); + + /* + * This deals with these formats + * ml_add_property ( + * 'comp_name', + * 'prop_set_name', + * 'prop_name', + * 'prop_value' + * ) + */ + + readToken (token); + if ( isType (token, TOKEN_OPEN_PAREN) ) + { + readToken (component); + readToken (token); + while (!(isType (token, TOKEN_COMMA) || + isType (token, TOKEN_CLOSE_PAREN) + )) + { + readToken (token); + } + + if (isType (token, TOKEN_COMMA)) + { + readToken (prop_set_name); + readToken (token); + while (!(isType (token, TOKEN_COMMA) || + isType (token, TOKEN_CLOSE_PAREN) + )) + { + readToken (token); + } + + if (isType (token, TOKEN_COMMA)) + { + readToken (prop_name); + + if (isType (component, TOKEN_STRING) && + isType (prop_set_name, TOKEN_STRING) && + isType (prop_name, TOKEN_STRING) ) + { + addToScope(component, prop_set_name->string, SQLTAG_MLPROP /* FIXME */); + addToScope(component, prop_name->string, SQLTAG_MLPROP /* FIXME */); + makeSqlTag (component, SQLTAG_MLPROP); + } + } + if( !isType (token, TOKEN_CLOSE_PAREN) ) + findToken (token, TOKEN_CLOSE_PAREN); + } + } + + findCmdTerm (token, TRUE); + + deleteToken (component); + deleteToken (prop_set_name); + deleteToken (prop_name); +} + +static void parseComment (tokenInfo *const token) +{ + /* + * This deals with this statement: + * COMMENT TO PRESERVE FORMAT ON PROCEDURE "DBA"."test" IS + * {create PROCEDURE DBA."test"() + * BEGIN + * signal dave; + * END + * } + * ; + * The comment can contain anything between the CURLY + * braces + * COMMENT ON USER "admin" IS + * 'Administration Group' + * ; + * Or it could be a simple string with no curly braces + */ + while (! isKeyword (token, KEYWORD_is)) + { + readToken (token); + } + readToken (token); + if ( isType(token, TOKEN_OPEN_CURLY) ) + { + findToken (token, TOKEN_CLOSE_CURLY); + } + + findCmdTerm (token, TRUE); } +static void parseKeywords (tokenInfo *const token) +{ + switch (token->keyword) + { + case KEYWORD_begin: parseBlock (token, FALSE); break; + case KEYWORD_comment: parseComment (token); break; + case KEYWORD_cursor: parseSimple (token, SQLTAG_CURSOR); break; + case KEYWORD_datatype: parseDomain (token); break; + case KEYWORD_declare: parseBlock (token, FALSE); break; + case KEYWORD_domain: parseDomain (token); break; + case KEYWORD_drop: parseDrop (token); break; + case KEYWORD_event: parseEvent (token); break; + case KEYWORD_function: parseSubProgram (token); break; + case KEYWORD_if: parseStatements (token, FALSE); break; + case KEYWORD_index: parseIndex (token); break; + case KEYWORD_ml_table: parseMLTable (token); break; + case KEYWORD_ml_table_lang: parseMLTable (token); break; + case KEYWORD_ml_table_dnet: parseMLTable (token); break; + case KEYWORD_ml_table_java: parseMLTable (token); break; + case KEYWORD_ml_table_chk: parseMLTable (token); break; + case KEYWORD_ml_conn: parseMLConn (token); break; + case KEYWORD_ml_conn_lang: parseMLConn (token); break; + case KEYWORD_ml_conn_dnet: parseMLConn (token); break; + case KEYWORD_ml_conn_java: parseMLConn (token); break; + case KEYWORD_ml_conn_chk: parseMLConn (token); break; + case KEYWORD_ml_prop: parseMLProp (token); break; + case KEYWORD_package: parsePackage (token); break; + case KEYWORD_procedure: parseSubProgram (token); break; + case KEYWORD_publication: parsePublication (token); break; + case KEYWORD_service: parseService (token); break; + case KEYWORD_subtype: parseSimple (token, SQLTAG_SUBTYPE); break; + case KEYWORD_synonym: parseSynonym (token); break; + case KEYWORD_table: parseTable (token); break; + case KEYWORD_trigger: parseTrigger (token); break; + case KEYWORD_type: parseType (token); break; + case KEYWORD_variable: parseVariable (token); break; + case KEYWORD_view: parseView (token); break; + default: break; + } +} + static void parseSqlFile (tokenInfo *const token) { - do - { - readToken (token); - /*dispToken(token, "psf");*/ - - if (isType (token, TOKEN_BLOCK_LABEL_BEGIN)) - parseLabel (token); - else switch (token->keyword) + do { - case KEYWORD_begin: parseBlock (token, FALSE); break; - case KEYWORD_cursor: parseSimple (token, SQLTAG_CURSOR); break; - case KEYWORD_datatype: parseDomain (token); break; - case KEYWORD_declare: parseBlock (token, FALSE); break; - case KEYWORD_domain: parseDomain (token); break; - case KEYWORD_drop: parseDrop (token); break; - case KEYWORD_event: parseEvent (token); break; - case KEYWORD_function: parseSubProgram (token); break; - case KEYWORD_index: parseIndex (token); break; - case KEYWORD_ml_table: parseMLTable (token); break; - case KEYWORD_ml_conn: parseMLConn (token); break; - case KEYWORD_package: parsePackage (token); break; - case KEYWORD_procedure: parseSubProgram (token); break; - case KEYWORD_publication: parsePublication (token); break; - case KEYWORD_service: parseService (token); break; - case KEYWORD_subtype: parseSimple (token, SQLTAG_SUBTYPE); break; - case KEYWORD_synonym: parseSynonym (token); break; - case KEYWORD_table: parseTable (token); break; - case KEYWORD_trigger: parseTrigger (token); break; - case KEYWORD_type: parseType (token); break; - case KEYWORD_variable: parseVariable (token); break; - case KEYWORD_view: parseView (token); break; - default: break; - } - } while (! isKeyword (token, KEYWORD_end)); + readToken (token); + + if (isType (token, TOKEN_BLOCK_LABEL_BEGIN)) + parseLabel (token); + else + parseKeywords (token); + } while (! isKeyword (token, KEYWORD_end)); } static void initialize (const langType language) { - Assert (sizeof (SqlKinds) / sizeof (SqlKinds [0]) == SQLTAG_COUNT); - Lang_sql = language; - buildSqlKeywordHash (); + Assert (sizeof (SqlKinds) / sizeof (SqlKinds [0]) == SQLTAG_COUNT); + Lang_sql = language; + buildSqlKeywordHash (); } static void findSqlTags (void) { - tokenInfo *const token = newToken (); - exception_t exception = (exception_t) (setjmp (Exception)); - while (exception == ExceptionNone) - parseSqlFile (token); - deleteToken (token); + tokenInfo *const token = newToken (); + exception_t exception = (exception_t) (setjmp (Exception)); + + while (exception == ExceptionNone) + parseSqlFile (token); + + deleteToken (token); } extern parserDefinition* SqlParser (void) { - static const char *const extensions [] = { "sql", NULL }; - parserDefinition* def = parserNew ("SQL"); - def->kinds = SqlKinds; - def->kindCount = KIND_COUNT (SqlKinds); - def->extensions = extensions; - def->parser = findSqlTags; - def->initialize = initialize; - return def; + static const char *const extensions [] = { "sql", NULL }; + parserDefinition* def = parserNew ("SQL"); + def->kinds = SqlKinds; + def->kindCount = KIND_COUNT (SqlKinds); + def->extensions = extensions; + def->parser = findSqlTags; + def->initialize = initialize; + return def; } -/* vi:set tabstop=8 shiftwidth=4: */ +/* vi:set tabstop=4 shiftwidth=4 noexpandtab: */ diff --git a/tests/ctags/3184782.sql.tags b/tests/ctags/3184782.sql.tags index 4517c7aa..3eacafde 100644 --- a/tests/ctags/3184782.sql.tags +++ b/tests/ctags/3184782.sql.tags @@ -1,7 +1,6 @@ # format=tagmanager -do_this_stuffÌ256Ö0 -elsifÌ16384Ö0 -myfn1Ì256Ö0 -myfn2Ì256Ö0 +do_this_stuffÌ256Îp_testÖ0 +myfn1Ì256Îp_testÖ0 +myfn2Ì256Îp_testÖ0 p_testÌ512Ö0 -process_thisÌ256Ö0 +process_thisÌ256Îp_testÖ0 diff --git a/tests/ctags/bug1570779.sql.tags b/tests/ctags/bug1570779.sql.tags index 76479f77..adc4193c 100644 --- a/tests/ctags/bug1570779.sql.tags +++ b/tests/ctags/bug1570779.sql.tags @@ -1,5 +1,5 @@ # format=tagmanager +addressÌ8ÎemployeesÖ0 employeesÌ1Ö0 -employees.addressÌ8Ö0 -employees.idÌ8Ö0 -employees.nameÌ8Ö0 +idÌ8ÎemployeesÖ0 +nameÌ8ÎemployeesÖ0 diff --git a/tests/ctags/bug1938565.sql.tags b/tests/ctags/bug1938565.sql.tags index d7a4f664..ba596240 100644 --- a/tests/ctags/bug1938565.sql.tags +++ b/tests/ctags/bug1938565.sql.tags @@ -1,4 +1,4 @@ # format=tagmanager demo_pkgÌ512Ö0 -func1Ì16Ö0 -func2Ì16Ö0 +func1Ì16Îdemo_pkgÖ0 +func2Ì16Îdemo_pkgÖ0 diff --git a/tests/ctags/bug1944150.sql.tags b/tests/ctags/bug1944150.sql.tags index 7f02fb00..ea7fdfbc 100644 --- a/tests/ctags/bug1944150.sql.tags +++ b/tests/ctags/bug1944150.sql.tags @@ -1,2 +1,2 @@ # format=tagmanager -cash_trade_comment.Ì65536Ö0 +tr_d_cash_trade_commentÌ65536Îcash_trade_commentÖ0 diff --git a/tests/ctags/bug823000.sql.tags b/tests/ctags/bug823000.sql.tags index 12e186e6..bb1ce94b 100644 --- a/tests/ctags/bug823000.sql.tags +++ b/tests/ctags/bug823000.sql.tags @@ -1,4 +1,4 @@ # format=tagmanager TESTÌ512Ö0 -TestFunc1Ì256Ö0 -TestFunc2Ì256Ö0 +TestFunc1Ì256ÎTESTÖ0 +TestFunc2Ì256ÎTESTÖ0 diff --git a/tests/ctags/db-trig.sql.tags b/tests/ctags/db-trig.sql.tags index a93aef28..892c6627 100644 --- a/tests/ctags/db-trig.sql.tags +++ b/tests/ctags/db-trig.sql.tags @@ -1,2 +1,3 @@ # format=tagmanager -database.startup_dbÌ65536Ö0 +restrict_loginÌ65536ÎdatabaseÖ0 +startup_dbÌ65536ÎdatabaseÖ0 diff --git a/tests/ctags/ingres_procedures.sql.tags b/tests/ctags/ingres_procedures.sql.tags index 8e60837a..8f790d0d 100644 --- a/tests/ctags/ingres_procedures.sql.tags +++ b/tests/ctags/ingres_procedures.sql.tags @@ -1,2 +1,7 @@ # format=tagmanager db0001Ì256Ö0 +db0002Ì256Ö0 +db0003Ì256Ö0 +errÌ16384Ö0 +nÌ16384Ö0 +xÌ16384Ö0 diff --git a/tests/ctags/random.sql.tags b/tests/ctags/random.sql.tags index 335de61e..f3bff2f0 100644 --- a/tests/ctags/random.sql.tags +++ b/tests/ctags/random.sql.tags @@ -1,12 +1,12 @@ # format=tagmanager -SeedÌ16384Ö0 -get_randÌ256Ö0 -get_rand_maxÌ256Ö0 -incrementÌ16384Ö0 -multiplierÌ16384Ö0 -randÌ16Ö0 -rand_maxÌ16Ö0 -rand_stringÌ16Ö0 +SeedÌ16384ÎrandomÖ0 +get_randÌ256ÎrandomÖ0 +get_rand_maxÌ256ÎrandomÖ0 +incrementÌ16384ÎrandomÖ0 +multiplierÌ16384ÎrandomÖ0 +randÌ16ÎrandomÖ0 +rand_maxÌ16ÎrandomÖ0 +rand_stringÌ16ÎrandomÖ0 randomÌ512Ö0 -smallerÌ16Ö0 -srandÌ256Ö0 +smallerÌ16ÎrandomÖ0 +srandÌ256ÎrandomÖ0 diff --git a/tests/ctags/readlob.sql.tags b/tests/ctags/readlob.sql.tags index cf1e0cd1..8fea5416 100644 --- a/tests/ctags/readlob.sql.tags +++ b/tests/ctags/readlob.sql.tags @@ -1,10 +1,10 @@ # format=tagmanager +b_fileÌ8Îlob_tableÖ0 +b_lobÌ8Îlob_tableÖ0 +c_lobÌ8Îlob_tableÖ0 charbufÌ16384Ö0 clob_locatorÌ16384Ö0 +idÌ8Îlob_tableÖ0 lob_tableÌ1Ö0 -lob_table.b_fileÌ8Ö0 -lob_table.b_lobÌ8Ö0 -lob_table.c_lobÌ8Ö0 -lob_table.idÌ8Ö0 read_amountÌ16384Ö0 read_offsetÌ16384Ö0 diff --git a/tests/ctags/readlong.sql.tags b/tests/ctags/readlong.sql.tags index 93071f1c..0d741767 100644 --- a/tests/ctags/readlong.sql.tags +++ b/tests/ctags/readlong.sql.tags @@ -3,7 +3,7 @@ cur1 long_lenÌ16384Ö0 long_pieceÌ16384Ö0 long_tabÌ16384Ö0 +longcolÌ8ÎlongtableÖ0 longtableÌ1Ö0 -longtable.longcolÌ8Ö0 piece_lenÌ16384Ö0 rcÌ16384Ö0 diff --git a/tests/ctags/refcurs.sql.tags b/tests/ctags/refcurs.sql.tags index 7919d562..400ca282 100644 --- a/tests/ctags/refcurs.sql.tags +++ b/tests/ctags/refcurs.sql.tags @@ -1,6 +1,6 @@ # format=tagmanager -get_cursor_refÌ16Ö0 -mainÌ256Ö0 -process_cursorÌ256Ö0 +get_cursor_refÌ16Îtest_ref_cursorÖ0 +mainÌ256Îtest_ref_cursorÖ0 +process_cursorÌ256Îtest_ref_cursorÖ0 test_ref_cursorÌ512Ö0 typesÌ512Ö0