tokenize method

List<Token> tokenize(
  1. String pattern
)

Implementation

List<Token> tokenize(String pattern) {
  // split pattern into chunks: sea (raw input) and islands (<ID>, <expr>)
  final chunks = split(pattern);

  // create token stream from text and tags
  final tokens = <Token>[];
  for (var chunk in chunks) {
    if (chunk is TagChunk) {
      final tagChunk = chunk;
      // add special rule token or conjure up new token from name
      if (isUpperCase(tagChunk.tag[0])) {
        final ttype = parser.getTokenType(tagChunk.tag);
        if (ttype == Token.INVALID_TYPE) {
          throw ArgumentError(
              'Unknown token ' + tagChunk.tag + ' in pattern: ' + pattern);
        }
        final t = TokenTagToken(tagChunk.tag, ttype, tagChunk.label);
        tokens.add(t);
      } else if (isLowerCase(tagChunk.tag[0])) {
        final ruleIndex = parser.getRuleIndex(tagChunk.tag);
        if (ruleIndex == -1) {
          throw ArgumentError(
              'Unknown rule ' + tagChunk.tag + ' in pattern: ' + pattern);
        }
        final ruleImaginaryTokenType =
            parser.ATNWithBypassAlts.ruleToTokenType[ruleIndex];
        tokens.add(RuleTagToken(
            tagChunk.tag, ruleImaginaryTokenType, tagChunk.label));
      } else {
        throw ArgumentError(
            'invalid tag: ' + tagChunk.tag + ' in pattern: ' + pattern);
      }
    } else {
      final textChunk = chunk as TextChunk;
      final inputStream = InputStream.fromString(textChunk.text);
      lexer.inputStream = inputStream;
      var t = lexer.nextToken();
      while (t.type != Token.EOF) {
        tokens.add(t);
        t = lexer.nextToken();
      }
    }
  }

//		System.out.println("tokens="+tokens);
  return tokens;
}