aboutsummaryrefslogtreecommitdiffstats
path: root/tool
diff options
context:
space:
mode:
authoryui-knk <spiketeika@gmail.com>2023-11-18 17:46:38 +0900
committerYuichiro Kaneko <spiketeika@gmail.com>2023-11-18 19:38:55 +0900
commitc56dd94db0001b900a2dab3ee350a182d6bb42af (patch)
tree7a4f5236452ae721f67fe6602f3dcbc68a085ac1 /tool
parentf479e629ab497f325091096819fa5bf60c0d03b2 (diff)
downloadruby-c56dd94db0001b900a2dab3ee350a182d6bb42af.tar.gz
Lrama v0.5.10
Diffstat (limited to 'tool')
-rw-r--r--tool/lrama/lib/lrama/context.rb4
-rw-r--r--tool/lrama/lib/lrama/counterexamples.rb3
-rw-r--r--tool/lrama/lib/lrama/counterexamples/path.rb46
-rw-r--r--tool/lrama/lib/lrama/counterexamples/production_path.rb17
-rw-r--r--tool/lrama/lib/lrama/counterexamples/start_path.rb21
-rw-r--r--tool/lrama/lib/lrama/counterexamples/transition_path.rb17
-rw-r--r--tool/lrama/lib/lrama/grammar.rb433
-rw-r--r--tool/lrama/lib/lrama/grammar/code.rb102
-rw-r--r--tool/lrama/lib/lrama/grammar/code/initial_action_code.rb28
-rw-r--r--tool/lrama/lib/lrama/grammar/code/no_reference_code.rb24
-rw-r--r--tool/lrama/lib/lrama/grammar/code/printer_code.rb34
-rw-r--r--tool/lrama/lib/lrama/grammar/code/rule_action.rb62
-rw-r--r--tool/lrama/lib/lrama/grammar/counter.rb15
-rw-r--r--tool/lrama/lib/lrama/grammar/error_token.rb6
-rw-r--r--tool/lrama/lib/lrama/grammar/parameterizing_rules/builder.rb43
-rw-r--r--tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/base.rb28
-rw-r--r--tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/list.rb20
-rw-r--r--tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/nonempty_list.rb20
-rw-r--r--tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/option.rb20
-rw-r--r--tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/separated_list.rb28
-rw-r--r--tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/separated_nonempty_list.rb27
-rw-r--r--tool/lrama/lib/lrama/grammar/printer.rb6
-rw-r--r--tool/lrama/lib/lrama/grammar/reference.rb23
-rw-r--r--tool/lrama/lib/lrama/grammar/rule.rb20
-rw-r--r--tool/lrama/lib/lrama/grammar/rule_builder.rb179
-rw-r--r--tool/lrama/lib/lrama/lexer.rb60
-rw-r--r--tool/lrama/lib/lrama/lexer/location.rb22
-rw-r--r--tool/lrama/lib/lrama/lexer/token.rb26
-rw-r--r--tool/lrama/lib/lrama/lexer/token/parameterizing.rb21
-rw-r--r--tool/lrama/lib/lrama/lexer/token/tag.rb4
-rw-r--r--tool/lrama/lib/lrama/lexer/token/user_code.rb58
-rw-r--r--tool/lrama/lib/lrama/options.rb3
-rw-r--r--tool/lrama/lib/lrama/output.rb4
-rw-r--r--tool/lrama/lib/lrama/parser.rb763
-rw-r--r--tool/lrama/lib/lrama/report/profile.rb13
-rw-r--r--tool/lrama/lib/lrama/version.rb2
-rw-r--r--tool/lrama/template/bison/yacc.c5
37 files changed, 1317 insertions, 890 deletions
diff --git a/tool/lrama/lib/lrama/context.rb b/tool/lrama/lib/lrama/context.rb
index 3d05c1f36e..895290a2bb 100644
--- a/tool/lrama/lib/lrama/context.rb
+++ b/tool/lrama/lib/lrama/context.rb
@@ -309,10 +309,8 @@ module Lrama
# Index is sequence number of nterm, value is state id
# of a default nterm transition destination.
@yydefgoto = Array.new(@states.nterms.count, 0)
- h = {}
# Mapping from nterm to next_states
nterm_to_next_states = {}
- terms_count = @states.terms.count
@states.states.each do |state|
state.nterm_transitions.each do |shift, next_state|
@@ -369,7 +367,7 @@ module Lrama
end
j = @sorted_actions.count - 1
- state_id, froms_and_tos, count, width = action
+ _state_id, _froms_and_tos, count, width = action
while (j >= 0) do
case
diff --git a/tool/lrama/lib/lrama/counterexamples.rb b/tool/lrama/lib/lrama/counterexamples.rb
index 5019257dc3..046265da59 100644
--- a/tool/lrama/lib/lrama/counterexamples.rb
+++ b/tool/lrama/lib/lrama/counterexamples.rb
@@ -3,7 +3,10 @@ require "set"
require "lrama/counterexamples/derivation"
require "lrama/counterexamples/example"
require "lrama/counterexamples/path"
+require "lrama/counterexamples/production_path"
+require "lrama/counterexamples/start_path"
require "lrama/counterexamples/state_item"
+require "lrama/counterexamples/transition_path"
require "lrama/counterexamples/triple"
module Lrama
diff --git a/tool/lrama/lib/lrama/counterexamples/path.rb b/tool/lrama/lib/lrama/counterexamples/path.rb
index a4caecd765..edba67a3b6 100644
--- a/tool/lrama/lib/lrama/counterexamples/path.rb
+++ b/tool/lrama/lib/lrama/counterexamples/path.rb
@@ -19,51 +19,5 @@ module Lrama
end
alias :inspect :to_s
end
-
- class StartPath < Path
- def initialize(to_state_item)
- super nil, to_state_item
- end
-
- def type
- :start
- end
-
- def transition?
- false
- end
-
- def production?
- false
- end
- end
-
- class TransitionPath < Path
- def type
- :transition
- end
-
- def transition?
- true
- end
-
- def production?
- false
- end
- end
-
- class ProductionPath < Path
- def type
- :production
- end
-
- def transition?
- false
- end
-
- def production?
- true
- end
- end
end
end
diff --git a/tool/lrama/lib/lrama/counterexamples/production_path.rb b/tool/lrama/lib/lrama/counterexamples/production_path.rb
new file mode 100644
index 0000000000..d7db688518
--- /dev/null
+++ b/tool/lrama/lib/lrama/counterexamples/production_path.rb
@@ -0,0 +1,17 @@
+module Lrama
+ class Counterexamples
+ class ProductionPath < Path
+ def type
+ :production
+ end
+
+ def transition?
+ false
+ end
+
+ def production?
+ true
+ end
+ end
+ end
+end
diff --git a/tool/lrama/lib/lrama/counterexamples/start_path.rb b/tool/lrama/lib/lrama/counterexamples/start_path.rb
new file mode 100644
index 0000000000..4a6821cd0f
--- /dev/null
+++ b/tool/lrama/lib/lrama/counterexamples/start_path.rb
@@ -0,0 +1,21 @@
+module Lrama
+ class Counterexamples
+ class StartPath < Path
+ def initialize(to_state_item)
+ super nil, to_state_item
+ end
+
+ def type
+ :start
+ end
+
+ def transition?
+ false
+ end
+
+ def production?
+ false
+ end
+ end
+ end
+end
diff --git a/tool/lrama/lib/lrama/counterexamples/transition_path.rb b/tool/lrama/lib/lrama/counterexamples/transition_path.rb
new file mode 100644
index 0000000000..96e611612a
--- /dev/null
+++ b/tool/lrama/lib/lrama/counterexamples/transition_path.rb
@@ -0,0 +1,17 @@
+module Lrama
+ class Counterexamples
+ class TransitionPath < Path
+ def type
+ :transition
+ end
+
+ def transition?
+ true
+ end
+
+ def production?
+ false
+ end
+ end
+ end
+end
diff --git a/tool/lrama/lib/lrama/grammar.rb b/tool/lrama/lib/lrama/grammar.rb
index f8f9994e08..9664caeb2b 100644
--- a/tool/lrama/lib/lrama/grammar.rb
+++ b/tool/lrama/lib/lrama/grammar.rb
@@ -1,13 +1,13 @@
-require "strscan"
-
require "lrama/grammar/auxiliary"
require "lrama/grammar/code"
+require "lrama/grammar/counter"
require "lrama/grammar/error_token"
require "lrama/grammar/percent_code"
require "lrama/grammar/precedence"
require "lrama/grammar/printer"
require "lrama/grammar/reference"
require "lrama/grammar/rule"
+require "lrama/grammar/rule_builder"
require "lrama/grammar/symbol"
require "lrama/grammar/union"
require "lrama/lexer"
@@ -21,17 +21,19 @@ module Lrama
:printers, :error_tokens,
:lex_param, :parse_param, :initial_action,
:symbols, :types,
- :rules, :_rules,
+ :rules, :rule_builders,
:sym_to_rules
- def initialize
+ def initialize(rule_counter)
+ @rule_counter = rule_counter
+
# Code defined by "%code"
@percent_codes = []
@printers = []
@error_tokens = []
@symbols = []
@types = []
- @_rules = []
+ @rule_builders = []
@rules = []
@sym_to_rules = {}
@empty_symbol = nil
@@ -48,12 +50,12 @@ module Lrama
@percent_codes << PercentCode.new(id, code)
end
- def add_printer(ident_or_tags:, code:, lineno:)
- @printers << Printer.new(ident_or_tags: ident_or_tags, code: code, lineno: lineno)
+ def add_printer(ident_or_tags:, token_code:, lineno:)
+ @printers << Printer.new(ident_or_tags: ident_or_tags, token_code: token_code, lineno: lineno)
end
- def add_error_token(ident_or_tags:, code:, lineno:)
- @error_tokens << ErrorToken.new(ident_or_tags: ident_or_tags, code: code, lineno: lineno)
+ def add_error_token(ident_or_tags:, token_code:, lineno:)
+ @error_tokens << ErrorToken.new(ident_or_tags: ident_or_tags, token_code: token_code, lineno: lineno)
end
def add_term(id:, alias_name: nil, tag: nil, token_id: nil, replace: false)
@@ -123,12 +125,8 @@ module Lrama
@union = Union.new(code: code, lineno: lineno)
end
- def add_rule(lhs:, rhs:, lineno:)
- @_rules << [lhs, rhs, lineno]
- end
-
- def build_code(type, token_code)
- Code.new(type: type, token_code: token_code)
+ def add_rule_builder(builder)
+ @rule_builders << builder
end
def prologue_first_lineno=(prologue_first_lineno)
@@ -148,7 +146,6 @@ module Lrama
end
def prepare
- extract_references
normalize_rules
collect_symbols
replace_token_with_symbol
@@ -159,14 +156,74 @@ module Lrama
fill_symbol_printer
fill_symbol_error_token
@symbols.sort_by!(&:number)
+ compute_nullable
+ compute_first_set
end
# TODO: More validation methods
+ #
+ # * Validaiton for no_declared_type_reference
def validate!
validate_symbol_number_uniqueness!
- validate_no_declared_type_reference!
+ validate_symbol_alias_name_uniqueness!
+ validate_rule_lhs_is_nterm!
+ end
+
+ def find_symbol_by_s_value(s_value)
+ @symbols.find do |sym|
+ sym.id.s_value == s_value
+ end
+ end
+
+ def find_symbol_by_s_value!(s_value)
+ find_symbol_by_s_value(s_value) || (raise "Symbol not found: #{s_value}")
+ end
+
+ def find_symbol_by_id(id)
+ @symbols.find do |sym|
+ sym.id == id || sym.alias_name == id.s_value
+ end
+ end
+
+ def find_symbol_by_id!(id)
+ find_symbol_by_id(id) || (raise "Symbol not found: #{id}")
+ end
+
+ def find_symbol_by_number!(number)
+ sym = @symbols[number]
+
+ raise "Symbol not found: #{number}" unless sym
+ raise "[BUG] Symbol number mismatch. #{number}, #{sym}" if sym.number != number
+
+ sym
+ end
+
+ def find_rules_by_symbol!(sym)
+ find_rules_by_symbol(sym) || (raise "Rules for #{sym} not found")
+ end
+
+ def find_rules_by_symbol(sym)
+ @sym_to_rules[sym.number]
+ end
+
+ def terms_count
+ terms.count
end
+ def terms
+ @terms ||= @symbols.select(&:term?)
+ end
+
+ def nterms_count
+ nterms.count
+ end
+
+ def nterms
+ @nterms ||= @symbols.select(&:nterm?)
+ end
+
+ private
+
def compute_nullable
@rules.each do |rule|
case
@@ -251,160 +308,9 @@ module Lrama
end
end
- def find_symbol_by_s_value(s_value)
- @symbols.find do |sym|
- sym.id.s_value == s_value
- end
- end
-
- def find_symbol_by_s_value!(s_value)
- find_symbol_by_s_value(s_value) || (raise "Symbol not found: #{s_value}")
- end
-
- def find_symbol_by_id(id)
- @symbols.find do |sym|
- # TODO: validate uniqueness of Token#s_value and Symbol#alias_name
- sym.id == id || sym.alias_name == id.s_value
- end
- end
-
- def find_symbol_by_id!(id)
- find_symbol_by_id(id) || (raise "Symbol not found: #{id}")
- end
-
- def find_symbol_by_number!(number)
- sym = @symbols[number]
-
- raise "Symbol not found: #{number}" unless sym
- raise "[BUG] Symbol number mismatch. #{number}, #{sym}" if sym.number != number
-
- sym
- end
-
- def find_rules_by_symbol!(sym)
- find_rules_by_symbol(sym) || (raise "Rules for #{sym} not found")
- end
-
- def find_rules_by_symbol(sym)
- @sym_to_rules[sym.number]
- end
-
- def terms_count
- terms.count
- end
-
- def terms
- @terms ||= @symbols.select(&:term?)
- end
-
- def nterms_count
- nterms.count
- end
-
- def nterms
- @nterms ||= @symbols.select(&:nterm?)
- end
-
- def scan_reference(scanner)
- start = scanner.pos
- case
- # $ references
- # It need to wrap an identifier with brackets to use ".-" for identifiers
- when scanner.scan(/\$(<[a-zA-Z0-9_]+>)?\$/) # $$, $<long>$
- tag = scanner[1] ? Lrama::Lexer::Token::Tag.new(s_value: scanner[1]) : nil
- return Reference.new(type: :dollar, value: "$", ex_tag: tag, first_column: start, last_column: scanner.pos - 1)
- when scanner.scan(/\$(<[a-zA-Z0-9_]+>)?(\d+)/) # $1, $2, $<long>1
- tag = scanner[1] ? Lrama::Lexer::Token::Tag.new(s_value: scanner[1]) : nil
- return Reference.new(type: :dollar, value: Integer(scanner[2]), ex_tag: tag, first_column: start, last_column: scanner.pos - 1)
- when scanner.scan(/\$(<[a-zA-Z0-9_]+>)?([a-zA-Z_][a-zA-Z0-9_]*)/) # $foo, $expr, $<long>program (named reference without brackets)
- tag = scanner[1] ? Lrama::Lexer::Token::Tag.new(s_value: scanner[1]) : nil
- return Reference.new(type: :dollar, value: scanner[2], ex_tag: tag, first_column: start, last_column: scanner.pos - 1)
- when scanner.scan(/\$(<[a-zA-Z0-9_]+>)?\[([a-zA-Z_.][-a-zA-Z0-9_.]*)\]/) # $expr.right, $expr-right, $<long>program (named reference with brackets)
- tag = scanner[1] ? Lrama::Lexer::Token::Tag.new(s_value: scanner[1]) : nil
- return Reference.new(type: :dollar, value: scanner[2], ex_tag: tag, first_column: start, last_column: scanner.pos - 1)
-
- # @ references
- # It need to wrap an identifier with brackets to use ".-" for identifiers
- when scanner.scan(/@\$/) # @$
- return Reference.new(type: :at, value: "$", first_column: start, last_column: scanner.pos - 1)
- when scanner.scan(/@(\d+)/) # @1
- return Reference.new(type: :at, value: Integer(scanner[1]), first_column: start, last_column: scanner.pos - 1)
- when scanner.scan(/@([a-zA-Z][a-zA-Z0-9_]*)/) # @foo, @expr (named reference without brackets)
- return Reference.new(type: :at, value: scanner[1], first_column: start, last_column: scanner.pos - 1)
- when scanner.scan(/@\[([a-zA-Z_.][-a-zA-Z0-9_.]*)\]/) # @expr.right, @expr-right (named reference with brackets)
- return Reference.new(type: :at, value: scanner[1], first_column: start, last_column: scanner.pos - 1)
- end
- end
-
- private
-
- def extract_references
- unless initial_action.nil?
- scanner = StringScanner.new(initial_action.s_value)
- references = []
-
- while !scanner.eos? do
- if reference = scan_reference(scanner)
- references << reference
- else
- scanner.getch
- end
- end
-
- initial_action.token_code.references = references
- end
-
- @printers.each do |printer|
- scanner = StringScanner.new(printer.code.s_value)
- references = []
-
- while !scanner.eos? do
- if reference = scan_reference(scanner)
- references << reference
- else
- scanner.getch
- end
- end
-
- printer.code.token_code.references = references
- end
-
- @error_tokens.each do |error_token|
- scanner = StringScanner.new(error_token.code.s_value)
- references = []
-
- while !scanner.eos? do
- if reference = scan_reference(scanner)
- references << reference
- else
- scanner.getch
- end
- end
-
- error_token.code.token_code.references = references
- end
-
- @_rules.each do |lhs, rhs, _|
- rhs.each_with_index do |token, index|
- next unless token.class == Lrama::Lexer::Token::UserCode
-
- scanner = StringScanner.new(token.s_value)
- references = []
-
- while !scanner.eos? do
- case
- when reference = scan_reference(scanner)
- references << reference
- when scanner.scan(/\/\*/)
- scanner.scan_until(/\*\//)
- else
- scanner.getch
- end
- end
-
- token.references = references
- numberize_references(lhs, rhs, token.references)
- end
+ def setup_rules
+ @rule_builders.each do |builder|
+ builder.setup_rules
end
end
@@ -444,35 +350,9 @@ module Lrama
@accept_symbol = term
end
- def numberize_references(lhs, rhs, references)
- references.map! {|ref|
- ref_name = ref.value
- if ref_name.is_a?(::String) && ref_name != '$'
- value =
- if lhs.referred_by?(ref_name)
- '$'
- else
- index = rhs.find_index {|token| token.referred_by?(ref_name) }
-
- if index
- index + 1
- else
- raise "'#{ref_name}' is invalid name."
- end
- end
-
- ref.value = value
- ref
- else
- ref
- end
- }
- end
-
# 1. Add $accept rule to the top of rules
- # 2. Extract precedence and last action
- # 3. Extract action in the middle of RHS into new Empty rule
- # 4. Append id and extract action then create Rule
+ # 2. Extract action in the middle of RHS into new Empty rule
+ # 3. Append id and extract action then create Rule
#
# Bison 3.8.2 uses different orders for symbol number and rule number
# when a rule has actions in the middle of a rule.
@@ -493,127 +373,38 @@ module Lrama
#
def normalize_rules
# 1. Add $accept rule to the top of rules
- accept = find_symbol_by_s_value!("$accept")
- eof = find_symbol_by_number!(0)
- lineno = @_rules.first ? @_rules.first[2] : 0
- @rules << Rule.new(id: @rules.count, lhs: accept, rhs: [@_rules.first[0], eof], code: nil, lineno: lineno)
-
- extracted_action_number = 1 # @n as nterm
+ accept = @accept_symbol
+ eof = @eof_symbol
+ lineno = @rule_builders.first ? @rule_builders.first.line : 0
+ @rules << Rule.new(id: @rule_counter.increment, _lhs: accept.id, _rhs: [@rule_builders.first.lhs, eof.id], token_code: nil, lineno: lineno)
- @_rules.each do |lhs, rhs, lineno|
- a = []
- rhs1 = []
- code = nil
- precedence_sym = nil
-
- # 2. Extract precedence and last action
- rhs.reverse.each do |r|
- case
- when r.is_a?(Symbol) # precedence_sym
- precedence_sym = r
- when r.is_a?(Lrama::Lexer::Token::UserCode) && precedence_sym.nil? && code.nil? && rhs1.empty?
- code = r
- else
- rhs1 << r
- end
- end
- rhs1.reverse!
-
- # Bison n'th component is 1-origin
- (rhs1 + [code]).compact.each.with_index(1) do |token, i|
- if token.is_a?(Lrama::Lexer::Token::UserCode)
- token.references.each do |ref|
- # Need to keep position_in_rhs for actions in the middle of RHS
- ref.position_in_rhs = i - 1
- next if ref.type == :at
- # $$, $n, @$, @n can be used in any actions
-
- if ref.value == "$"
- # TODO: Should be postponed after middle actions are extracted?
- ref.referring_symbol = lhs
- elsif ref.value.is_a?(Integer)
- raise "Can not refer following component. #{ref.value} >= #{i}. #{token}" if ref.value >= i
- rhs1[ref.value - 1].referred = true
- ref.referring_symbol = rhs1[ref.value - 1]
- elsif ref.value.is_a?(String)
- target_tokens = ([lhs] + rhs1 + [code]).compact.first(i)
- referring_symbol_candidate = target_tokens.filter {|token| token.referred_by?(ref.value) }
- raise "Referring symbol `#{ref.value}` is duplicated. #{token}" if referring_symbol_candidate.size >= 2
- raise "Referring symbol `#{ref.value}` is not found. #{token}" if referring_symbol_candidate.count == 0
-
- referring_symbol = referring_symbol_candidate.first
- referring_symbol.referred = true
- ref.referring_symbol = referring_symbol
- end
- end
- end
- end
+ setup_rules
- rhs2 = rhs1.map do |token|
- if token.is_a?(Lrama::Lexer::Token::UserCode)
- prefix = token.referred ? "@" : "$@"
- new_token = Lrama::Lexer::Token::Ident.new(s_value: prefix + extracted_action_number.to_s)
- extracted_action_number += 1
- a << [new_token, token]
- new_token
- else
- token
- end
+ @rule_builders.each do |builder|
+ # Extract actions in the middle of RHS into new rules.
+ builder.midrule_action_rules.each do |rule|
+ @rules << rule
end
- # Extract actions in the middle of RHS
- # into new rules.
- a.each do |new_token, code|
- @rules << Rule.new(id: @rules.count, lhs: new_token, rhs: [], code: Code.new(type: :user_code, token_code: code), lineno: code.line)
+ builder.rules.each do |rule|
+ add_nterm(id: rule._lhs)
+ @rules << rule
end
- c = code ? Code.new(type: :user_code, token_code: code) : nil
- # Expand Parameterizing rules
- if rhs2.any? {|r| r.is_a?(Lrama::Lexer::Token::Parameterizing) }
- expand_parameterizing_rules(lhs, rhs2, c, precedence_sym, lineno)
- else
- @rules << Rule.new(id: @rules.count, lhs: lhs, rhs: rhs2, code: c, precedence_sym: precedence_sym, lineno: lineno)
- end
- add_nterm(id: lhs)
- a.each do |new_token, _|
- add_nterm(id: new_token)
+ builder.midrule_action_rules.each do |rule|
+ add_nterm(id: rule._lhs)
end
end
end
- def expand_parameterizing_rules(lhs, rhs, code, precedence_sym, lineno)
- token = Lrama::Lexer::Token::Ident.new(s_value: rhs[0].s_value)
- if rhs.any? {|r| r.is_a?(Lrama::Lexer::Token::Parameterizing) && r.option? }
- option_token = Lrama::Lexer::Token::Ident.new(s_value: "option_#{rhs[0].s_value}")
- add_term(id: option_token)
- @rules << Rule.new(id: @rules.count, lhs: lhs, rhs: [option_token], code: code, precedence_sym: precedence_sym, lineno: lineno)
- @rules << Rule.new(id: @rules.count, lhs: option_token, rhs: [], code: code, precedence_sym: precedence_sym, lineno: lineno)
- @rules << Rule.new(id: @rules.count, lhs: option_token, rhs: [token], code: code, precedence_sym: precedence_sym, lineno: lineno)
- elsif rhs.any? {|r| r.is_a?(Lrama::Lexer::Token::Parameterizing) && r.nonempty_list? }
- nonempty_list_token = Lrama::Lexer::Token::Ident.new(s_value: "nonempty_list_#{rhs[0].s_value}")
- add_term(id: nonempty_list_token)
- @rules << Rule.new(id: @rules.count, lhs: lhs, rhs: [nonempty_list_token], code: code, precedence_sym: precedence_sym, lineno: lineno)
- @rules << Rule.new(id: @rules.count, lhs: nonempty_list_token, rhs: [token], code: code, precedence_sym: precedence_sym, lineno: lineno)
- @rules << Rule.new(id: @rules.count, lhs: nonempty_list_token, rhs: [nonempty_list_token, token], code: code, precedence_sym: precedence_sym, lineno: lineno)
- elsif rhs.any? {|r| r.is_a?(Lrama::Lexer::Token::Parameterizing) && r.list? }
- list_token = Lrama::Lexer::Token::Ident.new(s_value: "list_#{rhs[0].s_value}")
- add_term(id: list_token)
- @rules << Rule.new(id: @rules.count, lhs: lhs, rhs: [list_token], code: code, precedence_sym: precedence_sym, lineno: lineno)
- @rules << Rule.new(id: @rules.count, lhs: list_token, rhs: [], code: code, precedence_sym: precedence_sym, lineno: lineno)
- @rules << Rule.new(id: @rules.count, lhs: list_token, rhs: [list_token, token], code: code, precedence_sym: precedence_sym, lineno: lineno)
- end
- end
-
# Collect symbols from rules
def collect_symbols
- @rules.flat_map(&:rhs).each do |s|
+ @rules.flat_map(&:_rhs).each do |s|
case s
when Lrama::Lexer::Token::Char
add_term(id: s)
when Lrama::Lexer::Token
# skip
- when Symbol
- # skip
else
raise "Unknown class: #{s}"
end
@@ -695,21 +486,11 @@ module Lrama
def replace_token_with_symbol
@rules.each do |rule|
- rule.lhs = token_to_symbol(rule.lhs)
+ rule.lhs = token_to_symbol(rule._lhs) if rule._lhs
- rule.rhs.map! do |t|
+ rule.rhs = rule._rhs.map do |t|
token_to_symbol(t)
end
-
- if rule.code
- rule.code.references.each do |ref|
- next if ref.type == :at
-
- if !ref.referring_symbol.is_a?(Lrama::Lexer::Token::UserCode)
- ref.referring_symbol = token_to_symbol(ref.referring_symbol)
- end
- end
- end
end
end
@@ -717,8 +498,6 @@ module Lrama
case token
when Lrama::Lexer::Token
find_symbol_by_id!(token)
- when Symbol
- token
else
raise "Unknown class: #{token}"
end
@@ -801,17 +580,23 @@ module Lrama
raise "Symbol number is duplicated. #{invalid}"
end
- def validate_no_declared_type_reference!
+ def validate_symbol_alias_name_uniqueness!
+ invalid = @symbols.select(&:alias_name).group_by(&:alias_name).select do |alias_name, syms|
+ syms.count > 1
+ end
+
+ return if invalid.empty?
+
+ raise "Symbol alias name is duplicated. #{invalid}"
+ end
+
+ def validate_rule_lhs_is_nterm!
errors = []
rules.each do |rule|
- next unless rule.code
+ next if rule.lhs.nterm?
- rule.code.references.select do |ref|
- ref.type == :dollar && !ref.tag
- end.each do |ref|
- errors << "$#{ref.value} of '#{rule.lhs.id.s_value}' has no declared type"
- end
+ errors << "[BUG] LHS of #{rule} (line: #{rule.lineno}) is term. It should be nterm."
end
return if errors.empty?
diff --git a/tool/lrama/lib/lrama/grammar/code.rb b/tool/lrama/lib/lrama/grammar/code.rb
index 712cb1ad5a..f5c9d0f701 100644
--- a/tool/lrama/lib/lrama/grammar/code.rb
+++ b/tool/lrama/lib/lrama/grammar/code.rb
@@ -7,116 +7,32 @@ module Lrama
def_delegators "token_code", :s_value, :line, :column, :references
- # $$, $n, @$, @n is translated to C code
+ # $$, $n, @$, @n are translated to C code
def translated_code
- case type
- when :user_code
- translated_user_code
- when :initial_action
- translated_initial_action_code
- end
- end
-
- # * ($1) error
- # * ($$) *yyvaluep
- # * (@1) error
- # * (@$) *yylocationp
- def translated_printer_code(tag)
t_code = s_value.dup
references.reverse.each do |ref|
first_column = ref.first_column
last_column = ref.last_column
- case
- when ref.value == "$" && ref.type == :dollar # $$
- # Omit "<>"
- member = tag.s_value[1..-2]
- str = "((*yyvaluep).#{member})"
- when ref.value == "$" && ref.type == :at # @$
- str = "(*yylocationp)"
- when ref.type == :dollar # $n
- raise "$#{ref.value} can not be used in %printer."
- when ref.type == :at # @n
- raise "@#{ref.value} can not be used in %printer."
- else
- raise "Unexpected. #{self}, #{ref}"
- end
+ str = reference_to_c(ref)
t_code[first_column..last_column] = str
end
return t_code
end
- alias :translated_error_token_code :translated_printer_code
private
- # * ($1) yyvsp[i]
- # * ($$) yyval
- # * (@1) yylsp[i]
- # * (@$) yyloc
- def translated_user_code
- t_code = s_value.dup
-
- references.reverse.each do |ref|
- first_column = ref.first_column
- last_column = ref.last_column
-
- case
- when ref.value == "$" && ref.type == :dollar # $$
- # Omit "<>"
- member = ref.tag.s_value[1..-2]
- str = "(yyval.#{member})"
- when ref.value == "$" && ref.type == :at # @$
- str = "(yyloc)"
- when ref.type == :dollar # $n
- i = -ref.position_in_rhs + ref.value
- # Omit "<>"
- member = ref.tag.s_value[1..-2]
- str = "(yyvsp[#{i}].#{member})"
- when ref.type == :at # @n
- i = -ref.position_in_rhs + ref.value
- str = "(yylsp[#{i}])"
- else
- raise "Unexpected. #{self}, #{ref}"
- end
-
- t_code[first_column..last_column] = str
- end
-
- return t_code
- end
-
- # * ($1) error
- # * ($$) yylval
- # * (@1) error
- # * (@$) yylloc
- def translated_initial_action_code
- t_code = s_value.dup
-
- references.reverse.each do |ref|
- first_column = ref.first_column
- last_column = ref.last_column
-
- case
- when ref.value == "$" && ref.type == :dollar # $$
- str = "yylval"
- when ref.value == "$" && ref.type == :at # @$
- str = "yylloc"
- when ref.type == :dollar # $n
- raise "$#{ref.value} can not be used in initial_action."
- when ref.type == :at # @n
- raise "@#{ref.value} can not be used in initial_action."
- else
- raise "Unexpected. #{self}, #{ref}"
- end
-
- t_code[first_column..last_column] = str
- end
-
- return t_code
+ def reference_to_c(ref)
+ raise NotImplementedError.new("#reference_to_c is not implemented")
end
end
end
end
+
+require "lrama/grammar/code/initial_action_code"
+require "lrama/grammar/code/no_reference_code"
+require "lrama/grammar/code/printer_code"
+require "lrama/grammar/code/rule_action"
diff --git a/tool/lrama/lib/lrama/grammar/code/initial_action_code.rb b/tool/lrama/lib/lrama/grammar/code/initial_action_code.rb
new file mode 100644
index 0000000000..2b064f271e
--- /dev/null
+++ b/tool/lrama/lib/lrama/grammar/code/initial_action_code.rb
@@ -0,0 +1,28 @@
+module Lrama
+ class Grammar
+ class Code
+ class InitialActionCode < Code
+ private
+
+ # * ($$) yylval
+ # * (@$) yylloc
+ # * ($1) error
+ # * (@1) error
+ def reference_to_c(ref)
+ case
+ when ref.type == :dollar && ref.name == "$" # $$
+ "yylval"
+ when ref.type == :at && ref.name == "$" # @$
+ "yylloc"
+ when ref.type == :dollar # $n
+ raise "$#{ref.value} can not be used in initial_action."
+ when ref.type == :at # @n
+ raise "@#{ref.value} can not be used in initial_action."
+ else
+ raise "Unexpected. #{self}, #{ref}"
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/tool/lrama/lib/lrama/grammar/code/no_reference_code.rb b/tool/lrama/lib/lrama/grammar/code/no_reference_code.rb
new file mode 100644
index 0000000000..ac6cdb8fba
--- /dev/null
+++ b/tool/lrama/lib/lrama/grammar/code/no_reference_code.rb
@@ -0,0 +1,24 @@
+module Lrama
+ class Grammar
+ class Code
+ class NoReferenceCode < Code
+ private
+
+ # * ($$) error
+ # * (@$) error
+ # * ($1) error
+ # * (@1) error
+ def reference_to_c(ref)
+ case
+ when ref.type == :dollar # $$, $n
+ raise "$#{ref.value} can not be used in #{type}."
+ when ref.type == :at # @$, @n
+ raise "@#{ref.value} can not be used in #{type}."
+ else
+ raise "Unexpected. #{self}, #{ref}"
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/tool/lrama/lib/lrama/grammar/code/printer_code.rb b/tool/lrama/lib/lrama/grammar/code/printer_code.rb
new file mode 100644
index 0000000000..a19646adda
--- /dev/null
+++ b/tool/lrama/lib/lrama/grammar/code/printer_code.rb
@@ -0,0 +1,34 @@
+module Lrama
+ class Grammar
+ class Code
+ class PrinterCode < Code
+ def initialize(type: nil, token_code: nil, tag: nil)
+ super(type: type, token_code: token_code)
+ @tag = tag
+ end
+
+ private
+
+ # * ($$) *yyvaluep
+ # * (@$) *yylocationp
+ # * ($1) error
+ # * (@1) error
+ def reference_to_c(ref)
+ case
+ when ref.type == :dollar && ref.name == "$" # $$
+ member = @tag.member
+ "((*yyvaluep).#{member})"
+ when ref.type == :at && ref.name == "$" # @$
+ "(*yylocationp)"
+ when ref.type == :dollar # $n
+ raise "$#{ref.value} can not be used in #{type}."
+ when ref.type == :at # @n
+ raise "@#{ref.value} can not be used in #{type}."
+ else
+ raise "Unexpected. #{self}, #{ref}"
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/tool/lrama/lib/lrama/grammar/code/rule_action.rb b/tool/lrama/lib/lrama/grammar/code/rule_action.rb
new file mode 100644
index 0000000000..72afb62303
--- /dev/null
+++ b/tool/lrama/lib/lrama/grammar/code/rule_action.rb
@@ -0,0 +1,62 @@
+module Lrama
+ class Grammar
+ class Code
+ class RuleAction < Code
+ def initialize(type: nil, token_code: nil, rule: nil)
+ super(type: type, token_code: token_code)
+ @rule = rule
+ end
+
+ private
+
+ # * ($$) yyval
+ # * (@$) yyloc
+ # * ($1) yyvsp[i]
+ # * (@1) yylsp[i]
+ #
+ # "Rule" class: keyword_class { $1 } tSTRING { $2 + $3 } keyword_end { $class = $1 + $keyword_end }
+ # "Position in grammar" $1 $2 $3 $4 $5 $6
+ # "Index for yyvsp" -4 -3 -2 -1 0
+ def reference_to_c(ref)
+ case
+ when ref.type == :dollar && ref.name == "$" # $$
+ tag = ref.ex_tag || lhs.tag
+ raise_tag_not_found_error(ref) unless tag
+ "(yyval.#{tag.member})"
+ when ref.type == :at && ref.name == "$" # @$
+ "(yyloc)"
+ when ref.type == :dollar # $n
+ i = -position_in_rhs + ref.index
+ tag = ref.ex_tag || rhs[ref.index - 1].tag
+ raise_tag_not_found_error(ref) unless tag
+ "(yyvsp[#{i}].#{tag.member})"
+ when ref.type == :at # @n
+ i = -position_in_rhs + ref.index
+ "(yylsp[#{i}])"
+ else
+ raise "Unexpected. #{self}, #{ref}"
+ end
+ end
+
+ def position_in_rhs
+ # If rule is not derived rule, User Code is only action at
+ # the end of rule RHS. In such case, the action is located on
+ # `@rule.rhs.count`.
+ @rule.position_in_original_rule_rhs || @rule.rhs.count
+ end
+
+ def rhs
+ (@rule.original_rule || @rule).rhs
+ end
+
+ def lhs
+ (@rule.original_rule || @rule).lhs
+ end
+
+ def raise_tag_not_found_error(ref)
+ raise "Tag is not specified for '$#{ref.value}' in '#{@rule.to_s}'"
+ end
+ end
+ end
+ end
+end
diff --git a/tool/lrama/lib/lrama/grammar/counter.rb b/tool/lrama/lib/lrama/grammar/counter.rb
new file mode 100644
index 0000000000..c13f4ec3e3
--- /dev/null
+++ b/tool/lrama/lib/lrama/grammar/counter.rb
@@ -0,0 +1,15 @@
+module Lrama
+ class Grammar
+ class Counter
+ def initialize(number)
+ @number = number
+ end
+
+ def increment
+ n = @number
+ @number += 1
+ n
+ end
+ end
+ end
+end
diff --git a/tool/lrama/lib/lrama/grammar/error_token.rb b/tool/lrama/lib/lrama/grammar/error_token.rb
index de82523577..8efde7df33 100644
--- a/tool/lrama/lib/lrama/grammar/error_token.rb
+++ b/tool/lrama/lib/lrama/grammar/error_token.rb
@@ -1,8 +1,8 @@
module Lrama
class Grammar
- class ErrorToken < Struct.new(:ident_or_tags, :code, :lineno, keyword_init: true)
- def translated_code(member)
- code.translated_error_token_code(member)
+ class ErrorToken < Struct.new(:ident_or_tags, :token_code, :lineno, keyword_init: true)
+ def translated_code(tag)
+ Code::PrinterCode.new(type: :error_token, token_code: token_code, tag: tag).translated_code
end
end
end
diff --git a/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder.rb b/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder.rb
new file mode 100644
index 0000000000..28c9ad427d
--- /dev/null
+++ b/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder.rb
@@ -0,0 +1,43 @@
+require 'lrama/grammar/parameterizing_rules/builder/base'
+require 'lrama/grammar/parameterizing_rules/builder/list'
+require 'lrama/grammar/parameterizing_rules/builder/nonempty_list'
+require 'lrama/grammar/parameterizing_rules/builder/option'
+require 'lrama/grammar/parameterizing_rules/builder/separated_nonempty_list'
+require 'lrama/grammar/parameterizing_rules/builder/separated_list'
+
+module Lrama
+ class Grammar
+ class ParameterizingRules
+ class Builder
+ RULES = {
+ option: Lrama::Grammar::ParameterizingRules::Builder::Option,
+ "?": Lrama::Grammar::ParameterizingRules::Builder::Option,
+ nonempty_list: Lrama::Grammar::ParameterizingRules::Builder::NonemptyList,
+ "+": Lrama::Grammar::ParameterizingRules::Builder::NonemptyList,
+ list: Lrama::Grammar::ParameterizingRules::Builder::List,
+ "*": Lrama::Grammar::ParameterizingRules::Builder::List,
+ separated_nonempty_list: Lrama::Grammar::ParameterizingRules::Builder::SeparatedNonemptyList,
+ separated_list: Lrama::Grammar::ParameterizingRules::Builder::SeparatedList,
+ }
+
+ def initialize(token, rule_counter, lhs, user_code, precedence_sym, line)
+ @token = token
+ @key = token.s_value.to_sym
+ @rule_counter = rule_counter
+ @lhs = lhs
+ @user_code = user_code
+ @precedence_sym = precedence_sym
+ @line = line
+ end
+
+ def build
+ if RULES.key?(@key)
+ RULES[@key].new(@token, @rule_counter, @lhs, @user_code, @precedence_sym, @line).build
+ else
+ raise "Parameterizing rule does not exist. `#{@key}`"
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/base.rb b/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/base.rb
new file mode 100644
index 0000000000..a85348c94f
--- /dev/null
+++ b/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/base.rb
@@ -0,0 +1,28 @@
+module Lrama
+ class Grammar
+ class ParameterizingRules
+ class Builder
+ class Base
+ def initialize(token, rule_counter, lhs, user_code, precedence_sym, line)
+ @args = token.args
+ @token = @args.first
+ @rule_counter = rule_counter
+ @lhs = lhs
+ @user_code = user_code
+ @precedence_sym = precedence_sym
+ @line = line
+ @expected_argument_num = 1
+ end
+
+ private
+
+ def validate_argument_number!
+ unless @args.count == @expected_argument_num
+ raise "Invalid number of arguments. expect: #{@expected_argument_num} actual: #{@args.count}"
+ end
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/list.rb b/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/list.rb
new file mode 100644
index 0000000000..f814160416
--- /dev/null
+++ b/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/list.rb
@@ -0,0 +1,20 @@
+module Lrama
+ class Grammar
+ class ParameterizingRules
+ class Builder
+ class List < Base
+ def build
+ validate_argument_number!
+
+ rules = []
+ list_token = Lrama::Lexer::Token::Ident.new(s_value: "list_#{@token.s_value}")
+ rules << Rule.new(id: @rule_counter.increment, _lhs: @lhs, _rhs: [list_token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
+ rules << Rule.new(id: @rule_counter.increment, _lhs: list_token, _rhs: [], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
+ rules << Rule.new(id: @rule_counter.increment, _lhs: list_token, _rhs: [list_token, @token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
+ rules
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/nonempty_list.rb b/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/nonempty_list.rb
new file mode 100644
index 0000000000..142d6c156b
--- /dev/null
+++ b/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/nonempty_list.rb
@@ -0,0 +1,20 @@
+module Lrama
+ class Grammar
+ class ParameterizingRules
+ class Builder
+ class NonemptyList < Base
+ def build
+ validate_argument_number!
+
+ rules = []
+ nonempty_list_token = Lrama::Lexer::Token::Ident.new(s_value: "nonempty_list_#{@token.s_value}")
+ rules << Rule.new(id: @rule_counter.increment, _lhs: @lhs, _rhs: [nonempty_list_token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
+ rules << Rule.new(id: @rule_counter.increment, _lhs: nonempty_list_token, _rhs: [@token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
+ rules << Rule.new(id: @rule_counter.increment, _lhs: nonempty_list_token, _rhs: [nonempty_list_token, @token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
+ rules
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/option.rb b/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/option.rb
new file mode 100644
index 0000000000..f751609e44
--- /dev/null
+++ b/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/option.rb
@@ -0,0 +1,20 @@
+module Lrama
+ class Grammar
+ class ParameterizingRules
+ class Builder
+ class Option < Base
+ def build
+ validate_argument_number!
+
+ rules = []
+ option_token = Lrama::Lexer::Token::Ident.new(s_value: "option_#{@token.s_value}")
+ rules << Rule.new(id: @rule_counter.increment, _lhs: @lhs, _rhs: [option_token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
+ rules << Rule.new(id: @rule_counter.increment, _lhs: option_token, _rhs: [], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
+ rules << Rule.new(id: @rule_counter.increment, _lhs: option_token, _rhs: [@token], token_code: @ser_code, precedence_sym: @precedence_sym, lineno: @line)
+ rules
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/separated_list.rb b/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/separated_list.rb
new file mode 100644
index 0000000000..95f8156498
--- /dev/null
+++ b/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/separated_list.rb
@@ -0,0 +1,28 @@
+module Lrama
+ class Grammar
+ class ParameterizingRules
+ class Builder
+ class SeparatedList < Base
+ def initialize(token, rule_counter, lhs, user_code, precedence_sym, line)
+ super
+ @separator = @args[0]
+ @token = @args[1]
+ @expected_argument_num = 2
+ end
+
+ def build
+ validate_argument_number!
+
+ rules = []
+ separated_list_token = Lrama::Lexer::Token::Ident.new(s_value: "separated_list_#{@token.s_value}")
+ rules << Rule.new(id: @rule_counter.increment, _lhs: @lhs, _rhs: [separated_list_token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
+ rules << Rule.new(id: @rule_counter.increment, _lhs: separated_list_token, _rhs: [], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
+ rules << Rule.new(id: @rule_counter.increment, _lhs: separated_list_token, _rhs: [@token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
+ rules << Rule.new(id: @rule_counter.increment, _lhs: separated_list_token, _rhs: [separated_list_token, @separator, @token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
+ rules
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/separated_nonempty_list.rb b/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/separated_nonempty_list.rb
new file mode 100644
index 0000000000..64662180a0
--- /dev/null
+++ b/tool/lrama/lib/lrama/grammar/parameterizing_rules/builder/separated_nonempty_list.rb
@@ -0,0 +1,27 @@
+module Lrama
+ class Grammar
+ class ParameterizingRules
+ class Builder
+ class SeparatedNonemptyList < Base
+ def initialize(token, rule_counter, lhs, user_code, precedence_sym, line)
+ super
+ @separator = @args[0]
+ @token = @args[1]
+ @expected_argument_num = 2
+ end
+
+ def build
+ validate_argument_number!
+
+ rules = []
+ separated_list_token = Lrama::Lexer::Token::Ident.new(s_value: "separated_nonempty_list_#{@token.s_value}")
+ rules << Rule.new(id: @rule_counter.increment, _lhs: @lhs, _rhs: [separated_list_token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
+ rules << Rule.new(id: @rule_counter.increment, _lhs: separated_list_token, _rhs: [@token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
+ rules << Rule.new(id: @rule_counter.increment, _lhs: separated_list_token, _rhs: [separated_list_token, @separator, @token], token_code: @user_code, precedence_sym: @precedence_sym, lineno: @line)
+ rules
+ end
+ end
+ end
+ end
+ end
+end
diff --git a/tool/lrama/lib/lrama/grammar/printer.rb b/tool/lrama/lib/lrama/grammar/printer.rb
index da49463485..8984a96e1a 100644
--- a/tool/lrama/lib/lrama/grammar/printer.rb
+++ b/tool/lrama/lib/lrama/grammar/printer.rb
@@ -1,8 +1,8 @@
module Lrama
class Grammar
- class Printer < Struct.new(:ident_or_tags, :code, :lineno, keyword_init: true)
- def translated_code(member)
- code.translated_printer_code(member)
+ class Printer < Struct.new(:ident_or_tags, :token_code, :lineno, keyword_init: true)
+ def translated_code(tag)
+ Code::PrinterCode.new(type: :printer, token_code: token_code, tag: tag).translated_code
end
end
end
diff --git a/tool/lrama/lib/lrama/grammar/reference.rb b/tool/lrama/lib/lrama/grammar/reference.rb
index bc178e104d..24c981298e 100644
--- a/tool/lrama/lib/lrama/grammar/reference.rb
+++ b/tool/lrama/lib/lrama/grammar/reference.rb
@@ -1,21 +1,12 @@
-# type: :dollar or :at
-# ex_tag: "$<tag>1" (Optional)
-
module Lrama
class Grammar
- class Reference < Struct.new(:type, :value, :ex_tag, :first_column, :last_column, :referring_symbol, :position_in_rhs, keyword_init: true)
- def tag
- if ex_tag
- ex_tag
- else
- # FIXME: Remove this class check
- if referring_symbol.is_a?(Symbol)
- referring_symbol.tag
- else
- # Lrama::Lexer::Token (User_code) case
- nil
- end
- end
+ # type: :dollar or :at
+ # name: String (e.g. $$, $foo, $expr.right)
+ # index: Integer (e.g. $1)
+ # ex_tag: "$<tag>1" (Optional)
+ class Reference < Struct.new(:type, :name, :index, :ex_tag, :first_column, :last_column, keyword_init: true)
+ def value
+ name || index
end
end
end
diff --git a/tool/lrama/lib/lrama/grammar/rule.rb b/tool/lrama/lib/lrama/grammar/rule.rb
index c559388b62..13b44c009f 100644
--- a/tool/lrama/lib/lrama/grammar/rule.rb
+++ b/tool/lrama/lib/lrama/grammar/rule.rb
@@ -1,6 +1,20 @@
module Lrama
class Grammar
- class Rule < Struct.new(:id, :lhs, :rhs, :code, :nullable, :precedence_sym, :lineno, keyword_init: true)
+ # _rhs holds original RHS element. Use rhs to refer to Symbol.
+ class Rule < Struct.new(:id, :_lhs, :lhs, :_rhs, :rhs, :token_code, :position_in_original_rule_rhs, :nullable, :precedence_sym, :lineno, keyword_init: true)
+ attr_accessor :original_rule
+
+ def ==(other)
+ self.class == other.class &&
+ self.lhs == other.lhs &&
+ self.rhs == other.rhs &&
+ self.token_code == other.token_code &&
+ self.position_in_original_rule_rhs == other.position_in_original_rule_rhs &&
+ self.nullable == other.nullable &&
+ self.precedence_sym == other.precedence_sym &&
+ self.lineno == other.lineno
+ end
+
# TODO: Change this to display_name
def to_s
l = lhs.id.s_value
@@ -32,7 +46,9 @@ module Lrama
end
def translated_code
- code&.translated_code
+ return nil unless token_code
+
+ Code::RuleAction.new(type: :rule_action, token_code: token_code, rule: self).translated_code
end
end
end
diff --git a/tool/lrama/lib/lrama/grammar/rule_builder.rb b/tool/lrama/lib/lrama/grammar/rule_builder.rb
new file mode 100644
index 0000000000..80cd0c2ca3
--- /dev/null
+++ b/tool/lrama/lib/lrama/grammar/rule_builder.rb
@@ -0,0 +1,179 @@
+require 'lrama/grammar/parameterizing_rules/builder'
+
+module Lrama
+ class Grammar
+ class RuleBuilder
+ attr_accessor :lhs, :line
+ attr_reader :rhs, :user_code, :precedence_sym
+
+ def initialize(rule_counter, midrule_action_counter, position_in_original_rule_rhs = nil, skip_preprocess_references: false)
+ @rule_counter = rule_counter
+ @midrule_action_counter = midrule_action_counter
+ @position_in_original_rule_rhs = position_in_original_rule_rhs
+ @skip_preprocess_references = skip_preprocess_references
+
+ @lhs = nil
+ @rhs = []
+ @user_code = nil
+ @precedence_sym = nil
+ @line = nil
+ @rule_builders_for_derived_rules = []
+ end
+
+ def add_rhs(rhs)
+ if !@line
+ @line = rhs.line
+ end
+
+ flush_user_code
+
+ @rhs << rhs
+ end
+
+ def user_code=(user_code)
+ if !@line
+ @line = user_code.line
+ end
+
+ flush_user_code
+
+ @user_code = user_code
+ end
+
+ def precedence_sym=(precedence_sym)
+ flush_user_code
+
+ @precedence_sym = precedence_sym
+ end
+
+ def complete_input
+ freeze_rhs
+ end
+
+ def setup_rules
+ preprocess_references unless @skip_preprocess_references
+ process_rhs
+ build_rules
+ end
+
+ def parameterizing_rules
+ @parameterizing_rules
+ end
+
+ def midrule_action_rules
+ @midrule_action_rules
+ end
+
+ def rules
+ @rules
+ end
+
+ private
+
+ def freeze_rhs
+ @rhs.freeze
+ end
+
+ def preprocess_references
+ numberize_references
+ end
+
+ def build_rules
+ tokens = @replaced_rhs
+
+ # Expand Parameterizing rules
+ if tokens.any? {|r| r.is_a?(Lrama::Lexer::Token::Parameterizing) }
+ @rules = @parameterizing_rules
+ @midrule_action_rules = []
+ else
+ rule = Rule.new(
+ id: @rule_counter.increment, _lhs: lhs, _rhs: tokens, token_code: user_code,
+ position_in_original_rule_rhs: @position_in_original_rule_rhs, precedence_sym: precedence_sym, lineno: line
+ )
+ @rules = [rule]
+ @midrule_action_rules = @rule_builders_for_derived_rules.map do |rule_builder|
+ rule_builder.rules
+ end.flatten
+ @midrule_action_rules.each do |r|
+ r.original_rule = rule
+ end
+ end
+ end
+
+ # rhs is a mixture of variety type of tokens like `Ident`, `Parameterizing`, `UserCode` and so on.
+ # `#process_rhs` replaces some kind of tokens to `Ident` so that all `@replaced_rhs` are `Ident` or `Char`.
+ def process_rhs
+ return if @replaced_rhs
+
+ @replaced_rhs = []
+ @parameterizing_rules = []
+
+ rhs.each_with_index do |token, i|
+ case token
+ when Lrama::Lexer::Token::Char
+ @replaced_rhs << token
+ when Lrama::Lexer::Token::Ident
+ @replaced_rhs << token
+ when Lrama::Lexer::Token::Parameterizing
+ @parameterizing_rules = ParameterizingRules::Builder.new(token, @rule_counter, lhs, user_code, precedence_sym, line).build
+ @replaced_rhs << token
+ when Lrama::Lexer::Token::UserCode
+ prefix = token.referred ? "@" : "$@"
+ new_token = Lrama::Lexer::Token::Ident.new(s_value: prefix + @midrule_action_counter.increment.to_s)
+ @replaced_rhs << new_token
+
+ rule_builder = RuleBuilder.new(@rule_counter, @midrule_action_counter, i, skip_preprocess_references: true)
+ rule_builder.lhs = new_token
+ rule_builder.user_code = token
+ rule_builder.complete_input
+ rule_builder.setup_rules
+
+ @rule_builders_for_derived_rules << rule_builder
+ else
+ raise "Unexpected token. #{token}"
+ end
+ end
+ end
+
+ def numberize_references
+ # Bison n'th component is 1-origin
+ (rhs + [user_code]).compact.each.with_index(1) do |token, i|
+ next unless token.is_a?(Lrama::Lexer::Token::UserCode)
+
+ token.references.each do |ref|
+ ref_name = ref.name
+ if ref_name && ref_name != '$'
+ if lhs.referred_by?(ref_name)
+ ref.name = '$'
+ else
+ candidates = rhs.each_with_index.select {|token, i| token.referred_by?(ref_name) }
+
+ raise "Referring symbol `#{ref_name}` is duplicated. #{token}" if candidates.size >= 2
+ raise "Referring symbol `#{ref_name}` is not found. #{token}" unless referring_symbol = candidates.first
+
+ ref.index = referring_symbol[1] + 1
+ end
+ end
+
+ # TODO: Need to check index of @ too?
+ next if ref.type == :at
+
+ if ref.index
+ # TODO: Prohibit $0 even so Bison allows it?
+ # See: https://www.gnu.org/software/bison/manual/html_node/Actions.html
+ raise "Can not refer following component. #{ref.index} >= #{i}. #{token}" if ref.index >= i
+ rhs[ref.index - 1].referred = true
+ end
+ end
+ end
+ end
+
+ def flush_user_code
+ if c = @user_code
+ @rhs << c
+ @user_code = nil
+ end
+ end
+ end
+ end
+end
diff --git a/tool/lrama/lib/lrama/lexer.rb b/tool/lrama/lib/lrama/lexer.rb
index 870d087b38..18d702a49d 100644
--- a/tool/lrama/lib/lrama/lexer.rb
+++ b/tool/lrama/lib/lrama/lexer.rb
@@ -1,4 +1,5 @@
require "strscan"
+require "lrama/lexer/location"
require "lrama/lexer/token"
module Lrama
@@ -7,7 +8,7 @@ module Lrama
attr_accessor :status
attr_accessor :end_symbol
- SYMBOLS = %w(%{ %} %% { } \[ \] : \| ;)
+ SYMBOLS = ['%{', '%}', '%%', '{', '}', '\[', '\]', '\(', '\)', '\,', ':', '\|', ';']
PERCENT_TOKENS = %w(
%union
%token
@@ -31,8 +32,8 @@ module Lrama
def initialize(text)
@scanner = StringScanner.new(text)
- @head = @scanner.pos
- @line = 1
+ @head_column = @head = @scanner.pos
+ @head_line = @line = 1
@status = :initial
@end_symbol = nil
end
@@ -54,6 +55,13 @@ module Lrama
@scanner.pos - @head
end
+ def location
+ Location.new(
+ first_line: @head_line, first_column: @head_column,
+ last_line: @line, last_column: column
+ )
+ end
+
def lex_token
while !@scanner.eos? do
case
@@ -63,9 +71,8 @@ module Lrama
# noop
when @scanner.scan(/\/\*/)
lex_comment
- when @scanner.scan(/\/\//)
- @scanner.scan_until(/\n/)
- newline
+ when @scanner.scan(/\/\/.*(?<newline>\n)?/)
+ newline if @scanner[:newline]
else
break
end
@@ -84,17 +91,17 @@ module Lrama
when @scanner.scan(/[\?\+\*]/)
return [@scanner.matched, @scanner.matched]
when @scanner.scan(/<\w+>/)
- return [:TAG, setup_token(Lrama::Lexer::Token::Tag.new(s_value: @scanner.matched))]
+ return [:TAG, Lrama::Lexer::Token::Tag.new(s_value: @scanner.matched, location: location)]
when @scanner.scan(/'.'/)
- return [:CHARACTER, setup_token(Lrama::Lexer::Token::Char.new(s_value: @scanner.matched))]
+ return [:CHARACTER, Lrama::Lexer::Token::Char.new(s_value: @scanner.matched, location: location)]
when @scanner.scan(/'\\\\'|'\\b'|'\\t'|'\\f'|'\\r'|'\\n'|'\\v'|'\\13'/)
- return [:CHARACTER, setup_token(Lrama::Lexer::Token::Char.new(s_value: @scanner.matched))]
- when @scanner.scan(/"/)
- return [:STRING, %Q("#{@scanner.scan_until(/"/)})]
+ return [:CHARACTER, Lrama::Lexer::Token::Char.new(s_value: @scanner.matched, location: location)]
+ when @scanner.scan(/".*?"/)
+ return [:STRING, %Q(#{@scanner.matched})]
when @scanner.scan(/\d+/)
return [:INTEGER, Integer(@scanner.matched)]
when @scanner.scan(/([a-zA-Z_.][-a-zA-Z0-9_.]*)/)
- token = setup_token(Lrama::Lexer::Token::Ident.new(s_value: @scanner.matched))
+ token = Lrama::Lexer::Token::Ident.new(s_value: @scanner.matched, location: location)
type =
if @scanner.check(/\s*(\[\s*[a-zA-Z_.][-a-zA-Z0-9_.]*\s*\])?\s*:/)
:IDENT_COLON
@@ -118,25 +125,27 @@ module Lrama
when @scanner.scan(/}/)
if nested == 0 && @end_symbol == '}'
@scanner.unscan
- return [:C_DECLARATION, setup_token(Lrama::Lexer::Token::UserCode.new(s_value: code))]
+ return [:C_DECLARATION, Lrama::Lexer::Token::UserCode.new(s_value: code, location: location)]
else
code += @scanner.matched
nested -= 1
end
when @scanner.check(/#{@end_symbol}/)
- return [:C_DECLARATION, setup_token(Lrama::Lexer::Token::UserCode.new(s_value: code))]
+ return [:C_DECLARATION, Lrama::Lexer::Token::UserCode.new(s_value: code, location: location)]
when @scanner.scan(/\n/)
code += @scanner.matched
newline
- when @scanner.scan(/"/)
- matched = @scanner.scan_until(/"/)
- code += %Q("#{matched})
- @line += matched.count("\n")
- when @scanner.scan(/'/)
- matched = @scanner.scan_until(/'/)
- code += %Q('#{matched})
+ when @scanner.scan(/".*?"/)
+ code += %Q(#{@scanner.matched})
+ @line += @scanner.matched.count("\n")
+ when @scanner.scan(/'.*?'/)
+ code += %Q(#{@scanner.matched})
else
- code += @scanner.getch
+ if @scanner.scan(/[^\"'\{\}\n#{@end_symbol}]+/)
+ code += @scanner.matched
+ else
+ code += @scanner.getch
+ end
end
end
raise ParseError, "Unexpected code: #{code}."
@@ -158,13 +167,6 @@ module Lrama
end
end
- def setup_token(token)
- token.line = @head_line
- token.column = @head_column
-
- token
- end
-
def newline
@line += 1
@head = @scanner.pos + 1
diff --git a/tool/lrama/lib/lrama/lexer/location.rb b/tool/lrama/lib/lrama/lexer/location.rb
new file mode 100644
index 0000000000..d247c7d4cf
--- /dev/null
+++ b/tool/lrama/lib/lrama/lexer/location.rb
@@ -0,0 +1,22 @@
+module Lrama
+ class Lexer
+ class Location
+ attr_reader :first_line, :first_column, :last_line, :last_column
+
+ def initialize(first_line:, first_column:, last_line:, last_column:)
+ @first_line = first_line
+ @first_column = first_column
+ @last_line = last_line
+ @last_column = last_column
+ end
+
+ def ==(other)
+ self.class == other.class &&
+ self.first_line == other.first_line &&
+ self.first_column == other.first_column &&
+ self.last_line == other.last_line &&
+ self.last_column == other.last_column
+ end
+ end
+ end
+end
diff --git a/tool/lrama/lib/lrama/lexer/token.rb b/tool/lrama/lib/lrama/lexer/token.rb
index 0951a92547..3b1b1f4fe3 100644
--- a/tool/lrama/lib/lrama/lexer/token.rb
+++ b/tool/lrama/lib/lrama/lexer/token.rb
@@ -1,20 +1,38 @@
module Lrama
class Lexer
- class Token < Struct.new(:s_value, :alias_name, keyword_init: true)
+ class Token < Struct.new(:s_value, :alias_name, :location, keyword_init: true)
- attr_accessor :line, :column, :referred
+ attr_accessor :referred
def to_s
- "#{super} line: #{line}, column: #{column}"
+ "#{super} location: #{location}"
end
def referred_by?(string)
- [self.s_value, self.alias_name].include?(string)
+ [self.s_value, self.alias_name].compact.include?(string)
end
def ==(other)
self.class == other.class && self.s_value == other.s_value
end
+
+ def first_line
+ location.first_line
+ end
+ alias :line :first_line
+
+ def first_column
+ location.first_column
+ end
+ alias :column :first_column
+
+ def last_line
+ location.last_line
+ end
+
+ def last_column
+ location.last_column
+ end
end
end
end
diff --git a/tool/lrama/lib/lrama/lexer/token/parameterizing.rb b/tool/lrama/lib/lrama/lexer/token/parameterizing.rb
index b5ce6fbde3..367c62e194 100644
--- a/tool/lrama/lib/lrama/lexer/token/parameterizing.rb
+++ b/tool/lrama/lib/lrama/lexer/token/parameterizing.rb
@@ -2,16 +2,31 @@ module Lrama
class Lexer
class Token
class Parameterizing < Token
+ attr_accessor :args
+
+ def initialize(s_value: nil, alias_name: nil, location: nil, args: [])
+ super s_value: s_value, alias_name: alias_name, location: location
+ @args = args
+ end
+
def option?
- self.s_value == "?"
+ %w(option ?).include?(self.s_value)
end
def nonempty_list?
- self.s_value == "+"
+ %w(nonempty_list +).include?(self.s_value)
end
def list?
- self.s_value == "*"
+ %w(list *).include?(self.s_value)
+ end
+
+ def separated_nonempty_list?
+ %w(separated_nonempty_list).include?(self.s_value)
+ end
+
+ def separated_list?
+ %w(separated_list).include?(self.s_value)
end
end
end
diff --git a/tool/lrama/lib/lrama/lexer/token/tag.rb b/tool/lrama/lib/lrama/lexer/token/tag.rb
index de1e7b3518..e54d773915 100644
--- a/tool/lrama/lib/lrama/lexer/token/tag.rb
+++ b/tool/lrama/lib/lrama/lexer/token/tag.rb
@@ -2,6 +2,10 @@ module Lrama
class Lexer
class Token
class Tag < Token
+ # Omit "<>"
+ def member
+ s_value[1..-2] or raise "Unexpected Tag format (#{s_value})"
+ end
end
end
end
diff --git a/tool/lrama/lib/lrama/lexer/token/user_code.rb b/tool/lrama/lib/lrama/lexer/token/user_code.rb
index abd51c752f..abe6fba2b3 100644
--- a/tool/lrama/lib/lrama/lexer/token/user_code.rb
+++ b/tool/lrama/lib/lrama/lexer/token/user_code.rb
@@ -1,12 +1,62 @@
+require "strscan"
+
module Lrama
class Lexer
class Token
class UserCode < Token
- attr_accessor :references
+ def references
+ @references ||= _references
+ end
+
+ private
+
+ def _references
+ scanner = StringScanner.new(s_value)
+ references = []
+
+ while !scanner.eos? do
+ case
+ when reference = scan_reference(scanner)
+ references << reference
+ when scanner.scan(/\/\*/)
+ scanner.scan_until(/\*\//)
+ else
+ scanner.getch
+ end
+ end
+
+ references
+ end
+
+ def scan_reference(scanner)
+ start = scanner.pos
+ case
+ # $ references
+ # It need to wrap an identifier with brackets to use ".-" for identifiers
+ when scanner.scan(/\$(<[a-zA-Z0-9_]+>)?\$/) # $$, $<long>$
+ tag = scanner[1] ? Lrama::Lexer::Token::Tag.new(s_value: scanner[1]) : nil
+ return Lrama::Grammar::Reference.new(type: :dollar, name: "$", ex_tag: tag, first_column: start, last_column: scanner.pos - 1)
+ when scanner.scan(/\$(<[a-zA-Z0-9_]+>)?(\d+)/) # $1, $2, $<long>1
+ tag = scanner[1] ? Lrama::Lexer::Token::Tag.new(s_value: scanner[1]) : nil
+ return Lrama::Grammar::Reference.new(type: :dollar, index: Integer(scanner[2]), ex_tag: tag, first_column: start, last_column: scanner.pos - 1)
+ when scanner.scan(/\$(<[a-zA-Z0-9_]+>)?([a-zA-Z_][a-zA-Z0-9_]*)/) # $foo, $expr, $<long>program (named reference without brackets)
+ tag = scanner[1] ? Lrama::Lexer::Token::Tag.new(s_value: scanner[1]) : nil
+ return Lrama::Grammar::Reference.new(type: :dollar, name: scanner[2], ex_tag: tag, first_column: start, last_column: scanner.pos - 1)
+ when scanner.scan(/\$(<[a-zA-Z0-9_]+>)?\[([a-zA-Z_.][-a-zA-Z0-9_.]*)\]/) # $expr.right, $expr-right, $<long>program (named reference with brackets)
+ tag = scanner[1] ? Lrama::Lexer::Token::Tag.new(s_value: scanner[1]) : nil
+ return Lrama::Grammar::Reference.new(type: :dollar, name: scanner[2], ex_tag: tag, first_column: start, last_column: scanner.pos - 1)
- def initialize(s_value: nil, alias_name: nil)
- super
- self.references = []
+ # @ references
+ # It need to wrap an identifier with brackets to use ".-" for identifiers
+ when scanner.scan(/@\$/) # @$
+ return Lrama::Grammar::Reference.new(type: :at, name: "$", first_column: start, last_column: scanner.pos - 1)
+ when scanner.scan(/@(\d+)/) # @1
+ return Lrama::Grammar::Reference.new(type: :at, index: Integer(scanner[1]), first_column: start, last_column: scanner.pos - 1)
+ when scanner.scan(/@([a-zA-Z][a-zA-Z0-9_]*)/) # @foo, @expr (named reference without brackets)
+ return Lrama::Grammar::Reference.new(type: :at, name: scanner[1], first_column: start, last_column: scanner.pos - 1)
+ when scanner.scan(/@\[([a-zA-Z_.][-a-zA-Z0-9_.]*)\]/) # @expr.right, @expr-right (named reference with brackets)
+ return Lrama::Grammar::Reference.new(type: :at, name: scanner[1], first_column: start, last_column: scanner.pos - 1)
+ end
end
end
end
diff --git a/tool/lrama/lib/lrama/options.rb b/tool/lrama/lib/lrama/options.rb
index 007661f632..e63679bcf2 100644
--- a/tool/lrama/lib/lrama/options.rb
+++ b/tool/lrama/lib/lrama/options.rb
@@ -4,7 +4,7 @@ module Lrama
attr_accessor :skeleton, :header, :header_file,
:report_file, :outfile,
:error_recovery, :grammar_file,
- :report_file, :trace_opts, :report_opts, :y,
+ :trace_opts, :report_opts, :y,
:debug
def initialize
@@ -15,7 +15,6 @@ module Lrama
@outfile = "y.tab.c"
@error_recovery = false
@grammar_file = nil
- @report_file = nil
@trace_opts = nil
@report_opts = nil
@y = STDIN
diff --git a/tool/lrama/lib/lrama/output.rb b/tool/lrama/lib/lrama/output.rb
index 3c97ff4b16..f672085097 100644
--- a/tool/lrama/lib/lrama/output.rb
+++ b/tool/lrama/lib/lrama/output.rb
@@ -186,9 +186,9 @@ module Lrama
str = ""
@context.states.rules.each do |rule|
- next unless rule.code
+ next unless rule.token_code
- code = rule.code
+ code = rule.token_code
spaces = " " * (code.column - 1)
str << <<-STR
diff --git a/tool/lrama/lib/lrama/parser.rb b/tool/lrama/lib/lrama/parser.rb
index 3afe372516..1284d47997 100644
--- a/tool/lrama/lib/lrama/parser.rb
+++ b/tool/lrama/lib/lrama/parser.rb
@@ -658,7 +658,7 @@ end
module Lrama
class Parser < Racc::Parser
-module_eval(<<'...end parser.y/module_eval...', 'parser.y', 383)
+module_eval(<<'...end parser.y/module_eval...', 'parser.y', 418)
include Lrama::Report::Duration
@@ -666,18 +666,18 @@ def initialize(text, path, debug = false)
@text = text
@path = path
@yydebug = debug
+ @rule_counter = Lrama::Grammar::Counter.new(0)
+ @midrule_action_counter = Lrama::Grammar::Counter.new(1)
end
def parse
report_duration(:parse) do
@lexer = Lrama::Lexer.new(@text)
- @grammar = Lrama::Grammar.new
+ @grammar = Lrama::Grammar.new(@rule_counter)
@precedence_number = 0
reset_precs
do_parse
@grammar.prepare
- @grammar.compute_nullable
- @grammar.compute_first_set
@grammar.validate!
@grammar
end
@@ -688,18 +688,40 @@ def next_token
end
def on_error(error_token_id, error_value, value_stack)
- if error_value.respond_to?(:line) && error_value.respond_to?(:column)
- line = error_value.line
- first_column = error_value.column
+ if error_value.is_a?(Lrama::Lexer::Token)
+ line = error_value.first_line
+ first_column = error_value.first_column
+ last_column = error_value.last_column
+ value = "'#{error_value.s_value}'"
else
line = @lexer.line
first_column = @lexer.head_column
+ last_column = @lexer.column
+ value = error_value.inspect
end
raise ParseError, <<~ERROR
- #{@path}:#{line}:#{first_column}: parse error on value #{error_value.inspect} (#{token_to_str(error_token_id) || '?'})
+ #{@path}:#{line}:#{first_column}: parse error on value #{value} (#{token_to_str(error_token_id) || '?'})
#{@text.split("\n")[line - 1]}
- #{carrets(first_column)}
+ #{carrets(first_column, last_column)}
+ ERROR
+end
+
+def on_action_error(error_message, error_value)
+ if error_value.is_a?(Lrama::Lexer::Token)
+ line = error_value.first_line
+ first_column = error_value.first_column
+ last_column = error_value.last_column
+ else
+ line = @lexer.line
+ first_column = @lexer.head_column
+ last_column = @lexer.column
+ end
+
+ raise ParseError, <<~ERROR
+ #{@path}:#{line}: #{error_message}
+ #{@text.split("\n")[line - 1]}
+ #{carrets(first_column, last_column)}
ERROR
end
@@ -720,255 +742,266 @@ def end_c_declaration
@lexer.end_symbol = nil
end
-def carrets(first_column)
- ' ' * (first_column + 1) + '^' * (@lexer.column - first_column)
+def carrets(first_column, last_column)
+ ' ' * (first_column + 1) + '^' * (last_column - first_column)
end
...end parser.y/module_eval...
##### State transition tables begin ###
racc_action_table = [
- 84, 137, 85, 3, 6, 43, 7, 42, 39, 67,
- 43, 8, 42, 136, 67, 43, 43, 42, 42, 33,
- 58, 142, 43, 43, 42, 42, 142, 21, 23, 24,
- 25, 26, 27, 28, 29, 30, 31, 86, 139, 140,
- 141, 143, 39, 139, 140, 141, 143, 79, 43, 43,
- 42, 42, 67, 63, 70, 43, 43, 42, 42, 40,
- 70, 21, 23, 24, 25, 26, 27, 28, 29, 30,
- 31, 9, 45, 47, 14, 12, 13, 15, 16, 17,
- 18, 47, 47, 19, 20, 21, 23, 24, 25, 26,
- 27, 28, 29, 30, 31, 43, 43, 42, 42, 50,
- 70, 70, 43, 43, 42, 42, 67, 161, 43, 43,
- 42, 42, 67, 161, 43, 43, 42, 42, 67, 161,
- 43, 43, 42, 42, 67, 161, 43, 43, 42, 42,
- 67, 161, 43, 43, 42, 42, 67, 161, 43, 43,
- 42, 42, 67, 67, 43, 43, 42, 42, 67, 67,
- 43, 43, 42, 42, 67, 67, 43, 43, 42, 42,
- 43, 43, 42, 42, 51, 52, 53, 54, 55, 76,
- 80, 82, 87, 87, 87, 89, 95, 99, 100, 108,
- 109, 111, 113, 114, 115, 116, 117, 120, 122, 123,
- 126, 127, 128, 130, 145, 147, 148, 149, 150, 151,
- 126, 82, 156, 157, 164, 167, 82 ]
+ 84, 43, 85, 139, 39, 67, 43, 138, 139, 153,
+ 67, 171, 172, 154, 155, 156, 43, 141, 42, 137,
+ 3, 58, 141, 21, 23, 24, 25, 26, 27, 28,
+ 29, 30, 31, 39, 6, 8, 7, 142, 79, 43,
+ 86, 42, 142, 67, 63, 43, 43, 42, 42, 33,
+ 70, 70, 21, 23, 24, 25, 26, 27, 28, 29,
+ 30, 31, 9, 40, 45, 14, 12, 13, 15, 16,
+ 17, 18, 47, 47, 19, 20, 21, 23, 24, 25,
+ 26, 27, 28, 29, 30, 31, 43, 43, 42, 42,
+ 47, 70, 70, 43, 43, 42, 42, 67, 165, 43,
+ 43, 42, 42, 67, 165, 43, 43, 42, 42, 67,
+ 165, 43, 43, 42, 42, 67, 165, 43, 43, 42,
+ 42, 67, 165, 43, 43, 42, 42, 67, 165, 43,
+ 43, 42, 42, 67, 67, 43, 43, 42, 42, 67,
+ 67, 43, 43, 42, 42, 67, 67, 43, 43, 42,
+ 42, 67, 67, 43, 43, 42, 42, 43, 43, 42,
+ 42, 43, 50, 42, 51, 52, 53, 54, 55, 76,
+ 80, 82, 87, 87, 87, 89, 95, 99, 100, 103,
+ 103, 103, 103, 108, 109, 111, 113, 114, 115, 116,
+ 117, 120, 123, 124, 127, 128, 129, 131, 144, 146,
+ 147, 148, 149, 150, 127, 82, 160, 161, 169, 175,
+ 176, 82 ]
racc_action_check = [
- 41, 124, 41, 1, 2, 125, 2, 125, 9, 125,
- 152, 3, 152, 124, 152, 26, 13, 26, 13, 7,
- 26, 125, 57, 58, 57, 58, 152, 9, 9, 9,
- 9, 9, 9, 9, 9, 9, 9, 41, 125, 125,
- 125, 125, 34, 152, 152, 152, 152, 34, 27, 28,
- 27, 28, 27, 27, 28, 29, 68, 29, 68, 12,
- 29, 34, 34, 34, 34, 34, 34, 34, 34, 34,
- 34, 4, 14, 15, 4, 4, 4, 4, 4, 4,
- 4, 16, 17, 4, 4, 4, 4, 4, 4, 4,
- 4, 4, 4, 4, 4, 30, 31, 30, 31, 18,
- 30, 31, 149, 69, 149, 69, 149, 149, 150, 70,
- 150, 70, 150, 150, 151, 73, 151, 73, 151, 151,
- 158, 74, 158, 74, 158, 158, 162, 75, 162, 75,
- 162, 162, 163, 95, 163, 95, 163, 163, 62, 63,
- 62, 63, 62, 63, 100, 102, 100, 102, 100, 102,
- 121, 143, 121, 143, 121, 143, 97, 103, 97, 103,
- 105, 118, 105, 118, 19, 21, 23, 24, 25, 32,
- 37, 38, 46, 48, 49, 50, 56, 60, 61, 81,
- 82, 88, 90, 91, 92, 93, 94, 98, 106, 107,
- 108, 109, 110, 112, 129, 131, 132, 133, 134, 135,
- 136, 138, 144, 146, 154, 166, 167 ]
+ 41, 126, 41, 126, 9, 126, 151, 125, 151, 139,
+ 151, 168, 168, 139, 139, 139, 26, 126, 26, 125,
+ 1, 26, 151, 9, 9, 9, 9, 9, 9, 9,
+ 9, 9, 9, 34, 2, 3, 2, 126, 34, 27,
+ 41, 27, 151, 27, 27, 28, 29, 28, 29, 7,
+ 28, 29, 34, 34, 34, 34, 34, 34, 34, 34,
+ 34, 34, 4, 12, 14, 4, 4, 4, 4, 4,
+ 4, 4, 15, 16, 4, 4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 4, 4, 30, 31, 30, 31,
+ 17, 30, 31, 148, 13, 148, 13, 148, 148, 149,
+ 57, 149, 57, 149, 149, 150, 58, 150, 58, 150,
+ 150, 162, 69, 162, 69, 162, 162, 166, 70, 166,
+ 70, 166, 166, 167, 95, 167, 95, 167, 167, 62,
+ 63, 62, 63, 62, 63, 100, 102, 100, 102, 100,
+ 102, 121, 142, 121, 142, 121, 142, 153, 172, 153,
+ 172, 153, 172, 97, 103, 97, 103, 105, 118, 105,
+ 118, 122, 18, 122, 19, 21, 23, 24, 25, 32,
+ 37, 38, 46, 48, 49, 50, 56, 60, 61, 68,
+ 73, 74, 75, 81, 82, 88, 90, 91, 92, 93,
+ 94, 98, 106, 107, 108, 109, 110, 112, 130, 132,
+ 133, 134, 135, 136, 137, 140, 143, 145, 158, 173,
+ 174, 175 ]
racc_action_pointer = [
- nil, 3, -6, 11, 62, nil, nil, 12, nil, 4,
- nil, nil, 53, 13, 65, 54, 62, 63, 94, 145,
- nil, 146, nil, 147, 148, 149, 12, 45, 46, 52,
- 92, 93, 167, nil, 38, nil, nil, 148, 131, nil,
+ nil, 20, 24, 35, 53, nil, nil, 42, nil, 0,
+ nil, nil, 57, 91, 57, 53, 54, 71, 157, 145,
+ nil, 146, nil, 147, 148, 149, 13, 36, 42, 43,
+ 83, 84, 167, nil, 29, nil, nil, 148, 128, nil,
nil, -5, nil, nil, nil, nil, 153, nil, 154, 155,
- 156, nil, nil, nil, nil, nil, 168, 19, 20, nil,
- 171, 170, 135, 136, nil, nil, nil, nil, 53, 100,
- 106, nil, nil, 112, 118, 124, nil, nil, nil, nil,
- nil, 146, 175, nil, nil, nil, nil, nil, 179, nil,
- 180, 181, 182, 183, 184, 130, nil, 153, 180, nil,
- 141, nil, 142, 154, nil, 157, 177, 187, 155, 150,
- 190, nil, 191, nil, nil, nil, nil, nil, 158, nil,
- nil, 147, nil, nil, -21, 2, nil, nil, nil, 174,
- nil, 175, 176, 177, 178, 179, 165, nil, 161, nil,
- nil, nil, nil, 148, 182, nil, 183, nil, nil, 99,
- 105, 111, 7, nil, 202, nil, nil, nil, 117, nil,
- nil, nil, 123, 129, nil, nil, 185, 166, nil ]
+ 156, nil, nil, nil, nil, nil, 168, 97, 103, nil,
+ 171, 170, 126, 127, nil, nil, nil, nil, 171, 109,
+ 115, nil, nil, 172, 173, 174, nil, nil, nil, nil,
+ nil, 150, 179, nil, nil, nil, nil, nil, 183, nil,
+ 184, 185, 186, 187, 188, 121, nil, 150, 184, nil,
+ 132, nil, 133, 151, nil, 154, 181, 191, 159, 151,
+ 194, nil, 195, nil, nil, nil, nil, nil, 155, nil,
+ nil, 138, 158, nil, nil, -15, -2, nil, nil, nil,
+ 178, nil, 179, 180, 181, 182, 183, 169, nil, -27,
+ 162, nil, 139, 186, nil, 187, nil, nil, 90, 96,
+ 102, 3, nil, 144, nil, nil, nil, nil, 206, nil,
+ nil, nil, 108, nil, nil, nil, 114, 120, -26, nil,
+ nil, nil, 145, 189, 173, 168, nil, nil ]
racc_action_default = [
- -2, -108, -8, -108, -108, -3, -4, -108, 169, -108,
- -9, -10, -108, -108, -108, -108, -108, -108, -108, -108,
- -23, -108, -27, -108, -108, -108, -108, -108, -108, -108,
- -108, -108, -108, -7, -95, -74, -76, -108, -92, -94,
- -11, -99, -72, -73, -98, -13, -14, -63, -15, -16,
- -108, -20, -24, -28, -31, -34, -37, -43, -108, -46,
- -49, -38, -53, -108, -56, -58, -59, -107, -39, -66,
- -108, -69, -71, -40, -41, -42, -5, -1, -75, -96,
- -77, -108, -108, -12, -100, -101, -102, -60, -108, -17,
- -108, -108, -108, -108, -108, -108, -47, -44, -51, -50,
- -108, -57, -54, -68, -70, -67, -108, -108, -82, -108,
- -108, -64, -108, -21, -25, -29, -32, -35, -45, -48,
- -52, -55, -6, -97, -78, -79, -83, -93, -61, -108,
- -18, -108, -108, -108, -108, -108, -82, -81, -92, -85,
- -86, -87, -88, -108, -108, -65, -108, -22, -26, -108,
- -108, -108, -80, -84, -108, -91, -62, -19, -30, -103,
- -105, -106, -33, -36, -89, -104, -108, -92, -90 ]
+ -2, -111, -8, -111, -111, -3, -4, -111, 178, -111,
+ -9, -10, -111, -111, -111, -111, -111, -111, -111, -111,
+ -23, -111, -27, -111, -111, -111, -111, -111, -111, -111,
+ -111, -111, -111, -7, -98, -74, -76, -111, -95, -97,
+ -11, -102, -72, -73, -101, -13, -14, -63, -15, -16,
+ -111, -20, -24, -28, -31, -34, -37, -43, -111, -46,
+ -49, -38, -53, -111, -56, -58, -59, -110, -39, -66,
+ -111, -69, -71, -40, -41, -42, -5, -1, -75, -99,
+ -77, -111, -111, -12, -103, -104, -105, -60, -111, -17,
+ -111, -111, -111, -111, -111, -111, -47, -44, -51, -50,
+ -111, -57, -54, -111, -70, -67, -111, -111, -82, -111,
+ -111, -64, -111, -21, -25, -29, -32, -35, -45, -48,
+ -52, -55, -68, -6, -100, -78, -79, -83, -96, -61,
+ -111, -18, -111, -111, -111, -111, -111, -82, -81, -72,
+ -95, -88, -111, -111, -65, -111, -22, -26, -111, -111,
+ -111, -80, -85, -111, -92, -93, -94, -84, -111, -91,
+ -62, -19, -30, -106, -108, -109, -33, -36, -111, -89,
+ -107, -86, -111, -111, -111, -95, -87, -90 ]
racc_goto_table = [
- 81, 64, 44, 57, 62, 96, 125, 104, 35, 165,
- 46, 48, 49, 165, 165, 60, 1, 72, 72, 72,
- 72, 103, 2, 105, 4, 34, 103, 103, 103, 68,
- 73, 74, 75, 78, 152, 97, 101, 64, 77, 5,
- 102, 104, 32, 104, 106, 96, 60, 60, 158, 162,
- 163, 10, 11, 41, 83, 112, 146, 72, 72, 72,
- 90, 131, 72, 72, 72, 91, 96, 132, 92, 133,
- 93, 134, 118, 94, 64, 135, 101, 121, 56, 61,
- 98, 119, 110, 144, 60, 88, 60, 129, 124, 154,
- 166, 107, 72, nil, 72, 101, nil, nil, nil, 138,
- 153, nil, nil, nil, nil, nil, nil, 60, nil, nil,
- nil, nil, nil, nil, nil, nil, nil, 155, nil, nil,
- nil, nil, nil, nil, nil, nil, 138, nil, nil, 168 ]
+ 64, 81, 57, 44, 62, 96, 105, 35, 104, 126,
+ 170, 46, 48, 49, 170, 170, 60, 1, 72, 72,
+ 72, 72, 68, 73, 74, 75, 162, 166, 167, 2,
+ 4, 34, 78, 77, 97, 101, 64, 5, 151, 122,
+ 102, 32, 106, 10, 104, 96, 11, 60, 60, 41,
+ 83, 112, 145, 90, 132, 91, 133, 92, 134, 72,
+ 72, 104, 93, 135, 94, 136, 96, 56, 61, 98,
+ 119, 118, 110, 64, 143, 101, 88, 121, 130, 125,
+ 152, 158, 173, 107, nil, 60, nil, 60, nil, nil,
+ nil, nil, nil, 72, 101, 72, nil, nil, nil, 140,
+ nil, nil, nil, 157, nil, nil, nil, nil, 60, nil,
+ nil, nil, 72, nil, nil, 159, nil, nil, nil, nil,
+ nil, nil, nil, nil, 140, nil, 168, nil, nil, nil,
+ nil, nil, nil, nil, nil, nil, nil, nil, 177, nil,
+ nil, nil, nil, nil, nil, 174 ]
racc_goto_check = [
- 48, 37, 33, 31, 36, 32, 50, 44, 45, 54,
- 13, 13, 13, 54, 54, 33, 1, 33, 33, 33,
- 33, 43, 2, 43, 3, 4, 43, 43, 43, 30,
- 30, 30, 30, 45, 50, 31, 37, 37, 5, 6,
- 36, 44, 7, 44, 8, 32, 33, 33, 19, 19,
- 19, 9, 10, 11, 12, 14, 15, 33, 33, 33,
- 16, 17, 33, 33, 33, 20, 32, 21, 22, 23,
- 24, 25, 31, 26, 37, 27, 37, 36, 28, 29,
- 34, 35, 39, 40, 33, 41, 33, 42, 49, 51,
- 52, 53, 33, nil, 33, 37, nil, nil, nil, 37,
- 48, nil, nil, nil, nil, nil, nil, 33, nil, nil,
- nil, nil, nil, nil, nil, nil, nil, 37, nil, nil,
- nil, nil, nil, nil, nil, nil, 37, nil, nil, 48 ]
+ 37, 48, 31, 33, 36, 32, 43, 45, 44, 50,
+ 55, 13, 13, 13, 55, 55, 33, 1, 33, 33,
+ 33, 33, 30, 30, 30, 30, 19, 19, 19, 2,
+ 3, 4, 45, 5, 31, 37, 37, 6, 50, 43,
+ 36, 7, 8, 9, 44, 32, 10, 33, 33, 11,
+ 12, 14, 15, 16, 17, 20, 21, 22, 23, 33,
+ 33, 44, 24, 25, 26, 27, 32, 28, 29, 34,
+ 35, 31, 39, 37, 40, 37, 41, 36, 42, 49,
+ 51, 52, 53, 54, nil, 33, nil, 33, nil, nil,
+ nil, nil, nil, 33, 37, 33, nil, nil, nil, 37,
+ nil, nil, nil, 48, nil, nil, nil, nil, 33, nil,
+ nil, nil, 33, nil, nil, 37, nil, nil, nil, nil,
+ nil, nil, nil, nil, 37, nil, 37, nil, nil, nil,
+ nil, nil, nil, nil, nil, nil, nil, nil, 48, nil,
+ nil, nil, nil, nil, nil, 37 ]
racc_goto_pointer = [
- nil, 16, 22, 22, 16, 4, 37, 36, -32, 47,
- 48, 40, 13, -5, -34, -74, 9, -52, nil, -101,
- 13, -47, 15, -46, 16, -45, 18, -42, 52, 52,
- 1, -23, -52, -11, 20, -17, -23, -26, nil, -5,
- -45, 38, -24, -47, -62, -1, nil, nil, -38, -20,
- -102, -53, -74, 12, -149 ]
+ nil, 17, 29, 28, 22, -1, 35, 35, -34, 39,
+ 42, 36, 9, -4, -38, -79, 2, -59, nil, -122,
+ 3, -58, 4, -57, 8, -53, 9, -52, 41, 41,
+ -6, -24, -52, -10, 9, -28, -23, -27, nil, -15,
+ -55, 29, -33, -64, -61, -2, nil, nil, -37, -29,
+ -99, -59, -60, -87, 4, -152 ]
racc_goto_default = [
nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
37, nil, nil, nil, nil, nil, nil, nil, 22, nil,
nil, nil, nil, nil, nil, nil, nil, nil, nil, nil,
- nil, nil, 59, 65, nil, nil, nil, 160, 66, nil,
+ nil, nil, 59, 65, nil, nil, nil, 164, 66, nil,
nil, nil, nil, 69, 71, nil, 36, 38, nil, nil,
- nil, nil, nil, nil, 159 ]
+ nil, nil, nil, nil, nil, 163 ]
racc_reduce_table = [
0, 0, :racc_error,
- 5, 44, :_reduce_none,
- 0, 45, :_reduce_none,
- 2, 45, :_reduce_none,
- 0, 50, :_reduce_4,
- 0, 51, :_reduce_5,
- 5, 49, :_reduce_6,
- 2, 49, :_reduce_none,
- 0, 46, :_reduce_8,
- 2, 46, :_reduce_none,
- 1, 52, :_reduce_none,
- 2, 52, :_reduce_11,
- 3, 52, :_reduce_none,
- 2, 52, :_reduce_none,
- 2, 52, :_reduce_none,
- 2, 52, :_reduce_15,
- 2, 52, :_reduce_16,
- 0, 57, :_reduce_17,
- 0, 58, :_reduce_18,
- 7, 52, :_reduce_19,
- 0, 59, :_reduce_20,
- 0, 60, :_reduce_21,
- 6, 52, :_reduce_22,
- 1, 52, :_reduce_none,
- 0, 63, :_reduce_24,
- 0, 64, :_reduce_25,
- 6, 53, :_reduce_26,
- 1, 53, :_reduce_none,
- 0, 65, :_reduce_28,
- 0, 66, :_reduce_29,
- 7, 53, :_reduce_none,
- 0, 67, :_reduce_31,
- 0, 68, :_reduce_32,
- 7, 53, :_reduce_33,
- 0, 69, :_reduce_34,
- 0, 70, :_reduce_35,
- 7, 53, :_reduce_36,
- 2, 61, :_reduce_none,
- 2, 61, :_reduce_38,
- 2, 61, :_reduce_39,
- 2, 61, :_reduce_40,
- 2, 61, :_reduce_41,
- 2, 61, :_reduce_42,
- 1, 71, :_reduce_43,
- 2, 71, :_reduce_44,
- 3, 71, :_reduce_45,
- 1, 74, :_reduce_46,
- 2, 74, :_reduce_47,
- 3, 75, :_reduce_48,
- 0, 77, :_reduce_none,
- 1, 77, :_reduce_none,
- 0, 78, :_reduce_none,
- 1, 78, :_reduce_none,
- 1, 72, :_reduce_53,
- 2, 72, :_reduce_54,
- 3, 72, :_reduce_55,
- 1, 79, :_reduce_56,
- 2, 79, :_reduce_57,
- 1, 80, :_reduce_none,
- 1, 80, :_reduce_none,
- 0, 82, :_reduce_60,
- 0, 83, :_reduce_61,
- 6, 56, :_reduce_62,
- 0, 84, :_reduce_63,
- 0, 85, :_reduce_64,
- 5, 56, :_reduce_65,
- 1, 73, :_reduce_66,
- 2, 73, :_reduce_67,
- 2, 73, :_reduce_68,
- 1, 86, :_reduce_69,
- 2, 86, :_reduce_70,
- 1, 87, :_reduce_none,
- 1, 76, :_reduce_72,
- 1, 76, :_reduce_73,
- 1, 47, :_reduce_none,
- 2, 47, :_reduce_none,
- 1, 88, :_reduce_none,
- 2, 88, :_reduce_none,
- 4, 89, :_reduce_78,
- 1, 92, :_reduce_79,
- 3, 92, :_reduce_80,
- 2, 92, :_reduce_none,
- 0, 93, :_reduce_82,
- 1, 93, :_reduce_83,
- 3, 93, :_reduce_84,
- 2, 93, :_reduce_85,
- 2, 93, :_reduce_86,
- 2, 93, :_reduce_87,
- 0, 94, :_reduce_88,
- 0, 95, :_reduce_89,
- 7, 93, :_reduce_90,
- 3, 93, :_reduce_91,
- 0, 91, :_reduce_none,
- 3, 91, :_reduce_93,
- 1, 90, :_reduce_none,
+ 5, 47, :_reduce_none,
0, 48, :_reduce_none,
- 0, 96, :_reduce_96,
- 3, 48, :_reduce_97,
- 1, 54, :_reduce_none,
- 0, 55, :_reduce_none,
- 1, 55, :_reduce_none,
+ 2, 48, :_reduce_none,
+ 0, 53, :_reduce_4,
+ 0, 54, :_reduce_5,
+ 5, 52, :_reduce_6,
+ 2, 52, :_reduce_none,
+ 0, 49, :_reduce_8,
+ 2, 49, :_reduce_none,
1, 55, :_reduce_none,
+ 2, 55, :_reduce_11,
+ 3, 55, :_reduce_none,
+ 2, 55, :_reduce_none,
+ 2, 55, :_reduce_none,
+ 2, 55, :_reduce_15,
+ 2, 55, :_reduce_16,
+ 0, 60, :_reduce_17,
+ 0, 61, :_reduce_18,
+ 7, 55, :_reduce_19,
+ 0, 62, :_reduce_20,
+ 0, 63, :_reduce_21,
+ 6, 55, :_reduce_22,
1, 55, :_reduce_none,
- 1, 62, :_reduce_103,
- 2, 62, :_reduce_104,
+ 0, 66, :_reduce_24,
+ 0, 67, :_reduce_25,
+ 6, 56, :_reduce_26,
+ 1, 56, :_reduce_none,
+ 0, 68, :_reduce_28,
+ 0, 69, :_reduce_29,
+ 7, 56, :_reduce_none,
+ 0, 70, :_reduce_31,
+ 0, 71, :_reduce_32,
+ 7, 56, :_reduce_33,
+ 0, 72, :_reduce_34,
+ 0, 73, :_reduce_35,
+ 7, 56, :_reduce_36,
+ 2, 64, :_reduce_none,
+ 2, 64, :_reduce_38,
+ 2, 64, :_reduce_39,
+ 2, 64, :_reduce_40,
+ 2, 64, :_reduce_41,
+ 2, 64, :_reduce_42,
+ 1, 74, :_reduce_43,
+ 2, 74, :_reduce_44,
+ 3, 74, :_reduce_45,
+ 1, 77, :_reduce_46,
+ 2, 77, :_reduce_47,
+ 3, 78, :_reduce_48,
+ 0, 80, :_reduce_none,
+ 1, 80, :_reduce_none,
+ 0, 81, :_reduce_none,
+ 1, 81, :_reduce_none,
+ 1, 75, :_reduce_53,
+ 2, 75, :_reduce_54,
+ 3, 75, :_reduce_55,
+ 1, 82, :_reduce_56,
+ 2, 82, :_reduce_57,
+ 1, 83, :_reduce_none,
+ 1, 83, :_reduce_none,
+ 0, 85, :_reduce_60,
+ 0, 86, :_reduce_61,
+ 6, 59, :_reduce_62,
+ 0, 87, :_reduce_63,
+ 0, 88, :_reduce_64,
+ 5, 59, :_reduce_65,
+ 1, 76, :_reduce_66,
+ 2, 76, :_reduce_67,
+ 3, 76, :_reduce_68,
+ 1, 89, :_reduce_69,
+ 2, 89, :_reduce_70,
+ 1, 90, :_reduce_none,
+ 1, 79, :_reduce_72,
+ 1, 79, :_reduce_73,
+ 1, 50, :_reduce_none,
+ 2, 50, :_reduce_none,
+ 1, 91, :_reduce_none,
+ 2, 91, :_reduce_none,
+ 4, 92, :_reduce_78,
+ 1, 95, :_reduce_79,
+ 3, 95, :_reduce_80,
+ 2, 95, :_reduce_none,
+ 0, 96, :_reduce_82,
+ 1, 96, :_reduce_83,
+ 3, 96, :_reduce_84,
+ 3, 96, :_reduce_85,
+ 5, 96, :_reduce_86,
+ 7, 96, :_reduce_87,
+ 0, 98, :_reduce_88,
+ 0, 99, :_reduce_89,
+ 7, 96, :_reduce_90,
+ 3, 96, :_reduce_91,
1, 97, :_reduce_none,
1, 97, :_reduce_none,
- 1, 81, :_reduce_107 ]
-
-racc_reduce_n = 108
-
-racc_shift_n = 169
+ 1, 97, :_reduce_none,
+ 0, 94, :_reduce_none,
+ 3, 94, :_reduce_96,
+ 1, 93, :_reduce_none,
+ 0, 51, :_reduce_none,
+ 0, 100, :_reduce_99,
+ 3, 51, :_reduce_100,
+ 1, 57, :_reduce_none,
+ 0, 58, :_reduce_none,
+ 1, 58, :_reduce_none,
+ 1, 58, :_reduce_none,
+ 1, 58, :_reduce_none,
+ 1, 65, :_reduce_106,
+ 2, 65, :_reduce_107,
+ 1, 101, :_reduce_none,
+ 1, 101, :_reduce_none,
+ 1, 84, :_reduce_110 ]
+
+racc_reduce_n = 111
+
+racc_shift_n = 178
racc_token_table = {
false => 0,
@@ -1007,15 +1040,18 @@ racc_token_table = {
":" => 33,
"|" => 34,
"%empty" => 35,
- "?" => 36,
- "+" => 37,
- "*" => 38,
+ "(" => 36,
+ ")" => 37,
+ "," => 38,
"%prec" => 39,
- "[" => 40,
- "]" => 41,
- "{...}" => 42 }
+ "?" => 40,
+ "+" => 41,
+ "*" => 42,
+ "[" => 43,
+ "]" => 44,
+ "{...}" => 45 }
-racc_nt_base = 43
+racc_nt_base = 46
racc_use_result_var = true
@@ -1073,10 +1109,13 @@ Racc_token_to_s_table = [
"\":\"",
"\"|\"",
"\"%empty\"",
+ "\"(\"",
+ "\")\"",
+ "\",\"",
+ "\"%prec\"",
"\"?\"",
"\"+\"",
"\"*\"",
- "\"%prec\"",
"\"[\"",
"\"]\"",
"\"{...}\"",
@@ -1131,6 +1170,7 @@ Racc_token_to_s_table = [
"named_ref_opt",
"rhs_list",
"rhs",
+ "parameterizing_suffix",
"@19",
"@20",
"@21",
@@ -1203,26 +1243,24 @@ module_eval(<<'.,.,', 'parser.y', 31)
module_eval(<<'.,.,', 'parser.y', 37)
def _reduce_15(val, _values, result)
val[1].each {|token|
- token.references = []
- @grammar.lex_param = @grammar.build_code(:lex_param, token).token_code.s_value
+ @grammar.lex_param = Grammar::Code::NoReferenceCode.new(type: :lex_param, token_code: token).token_code.s_value
}
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 44)
+module_eval(<<'.,.,', 'parser.y', 43)
def _reduce_16(val, _values, result)
val[1].each {|token|
- token.references = []
- @grammar.parse_param = @grammar.build_code(:parse_param, token).token_code.s_value
+ @grammar.parse_param = Grammar::Code::NoReferenceCode.new(type: :parse_param, token_code: token).token_code.s_value
}
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 51)
+module_eval(<<'.,.,', 'parser.y', 49)
def _reduce_17(val, _values, result)
begin_c_declaration("}")
@@ -1230,7 +1268,7 @@ module_eval(<<'.,.,', 'parser.y', 51)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 55)
+module_eval(<<'.,.,', 'parser.y', 53)
def _reduce_18(val, _values, result)
end_c_declaration
@@ -1238,7 +1276,7 @@ module_eval(<<'.,.,', 'parser.y', 55)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 59)
+module_eval(<<'.,.,', 'parser.y', 57)
def _reduce_19(val, _values, result)
@grammar.add_percent_code(id: val[1], code: val[4])
@@ -1246,7 +1284,7 @@ module_eval(<<'.,.,', 'parser.y', 59)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 63)
+module_eval(<<'.,.,', 'parser.y', 61)
def _reduce_20(val, _values, result)
begin_c_declaration("}")
@@ -1254,7 +1292,7 @@ module_eval(<<'.,.,', 'parser.y', 63)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 67)
+module_eval(<<'.,.,', 'parser.y', 65)
def _reduce_21(val, _values, result)
end_c_declaration
@@ -1262,9 +1300,9 @@ module_eval(<<'.,.,', 'parser.y', 67)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 71)
+module_eval(<<'.,.,', 'parser.y', 69)
def _reduce_22(val, _values, result)
- @grammar.initial_action = @grammar.build_code(:initial_action, val[3])
+ @grammar.initial_action = Grammar::Code::InitialActionCode.new(type: :initial_action, token_code: val[3])
result
end
@@ -1272,7 +1310,7 @@ module_eval(<<'.,.,', 'parser.y', 71)
# reduce 23 omitted
-module_eval(<<'.,.,', 'parser.y', 77)
+module_eval(<<'.,.,', 'parser.y', 75)
def _reduce_24(val, _values, result)
begin_c_declaration("}")
@@ -1280,7 +1318,7 @@ module_eval(<<'.,.,', 'parser.y', 77)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 81)
+module_eval(<<'.,.,', 'parser.y', 79)
def _reduce_25(val, _values, result)
end_c_declaration
@@ -1288,9 +1326,12 @@ module_eval(<<'.,.,', 'parser.y', 81)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 85)
+module_eval(<<'.,.,', 'parser.y', 83)
def _reduce_26(val, _values, result)
- @grammar.set_union(@grammar.build_code(:union, val[3]), val[3].line)
+ @grammar.set_union(
+ Grammar::Code::NoReferenceCode.new(type: :union, token_code: val[3]),
+ val[3].line
+ )
result
end
@@ -1298,7 +1339,7 @@ module_eval(<<'.,.,', 'parser.y', 85)
# reduce 27 omitted
-module_eval(<<'.,.,', 'parser.y', 90)
+module_eval(<<'.,.,', 'parser.y', 91)
def _reduce_28(val, _values, result)
begin_c_declaration("}")
@@ -1306,7 +1347,7 @@ module_eval(<<'.,.,', 'parser.y', 90)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 94)
+module_eval(<<'.,.,', 'parser.y', 95)
def _reduce_29(val, _values, result)
end_c_declaration
@@ -1316,7 +1357,7 @@ module_eval(<<'.,.,', 'parser.y', 94)
# reduce 30 omitted
-module_eval(<<'.,.,', 'parser.y', 99)
+module_eval(<<'.,.,', 'parser.y', 100)
def _reduce_31(val, _values, result)
begin_c_declaration("}")
@@ -1324,7 +1365,7 @@ module_eval(<<'.,.,', 'parser.y', 99)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 103)
+module_eval(<<'.,.,', 'parser.y', 104)
def _reduce_32(val, _values, result)
end_c_declaration
@@ -1332,15 +1373,19 @@ module_eval(<<'.,.,', 'parser.y', 103)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 107)
+module_eval(<<'.,.,', 'parser.y', 108)
def _reduce_33(val, _values, result)
- @grammar.add_printer(ident_or_tags: val[6], code: @grammar.build_code(:printer, val[3]), lineno: val[3].line)
+ @grammar.add_printer(
+ ident_or_tags: val[6],
+ token_code: val[3],
+ lineno: val[3].line
+ )
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 111)
+module_eval(<<'.,.,', 'parser.y', 116)
def _reduce_34(val, _values, result)
begin_c_declaration("}")
@@ -1348,7 +1393,7 @@ module_eval(<<'.,.,', 'parser.y', 111)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 115)
+module_eval(<<'.,.,', 'parser.y', 120)
def _reduce_35(val, _values, result)
end_c_declaration
@@ -1356,9 +1401,13 @@ module_eval(<<'.,.,', 'parser.y', 115)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 119)
+module_eval(<<'.,.,', 'parser.y', 124)
def _reduce_36(val, _values, result)
- @grammar.add_error_token(ident_or_tags: val[6], code: @grammar.build_code(:error_token, val[3]), lineno: val[3].line)
+ @grammar.add_error_token(
+ ident_or_tags: val[6],
+ token_code: val[3],
+ lineno: val[3].line
+ )
result
end
@@ -1366,7 +1415,7 @@ module_eval(<<'.,.,', 'parser.y', 119)
# reduce 37 omitted
-module_eval(<<'.,.,', 'parser.y', 125)
+module_eval(<<'.,.,', 'parser.y', 134)
def _reduce_38(val, _values, result)
val[1].each {|hash|
hash[:tokens].each {|id|
@@ -1378,7 +1427,7 @@ module_eval(<<'.,.,', 'parser.y', 125)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 133)
+module_eval(<<'.,.,', 'parser.y', 142)
def _reduce_39(val, _values, result)
val[1].each {|hash|
hash[:tokens].each {|id|
@@ -1392,7 +1441,7 @@ module_eval(<<'.,.,', 'parser.y', 133)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 143)
+module_eval(<<'.,.,', 'parser.y', 152)
def _reduce_40(val, _values, result)
val[1].each {|hash|
hash[:tokens].each {|id|
@@ -1406,7 +1455,7 @@ module_eval(<<'.,.,', 'parser.y', 143)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 153)
+module_eval(<<'.,.,', 'parser.y', 162)
def _reduce_41(val, _values, result)
val[1].each {|hash|
hash[:tokens].each {|id|
@@ -1420,7 +1469,7 @@ module_eval(<<'.,.,', 'parser.y', 153)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 163)
+module_eval(<<'.,.,', 'parser.y', 172)
def _reduce_42(val, _values, result)
val[1].each {|hash|
hash[:tokens].each {|id|
@@ -1434,7 +1483,7 @@ module_eval(<<'.,.,', 'parser.y', 163)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 174)
+module_eval(<<'.,.,', 'parser.y', 183)
def _reduce_43(val, _values, result)
val[0].each {|token_declaration|
@grammar.add_term(id: token_declaration[0], alias_name: token_declaration[2], token_id: token_declaration[1], tag: nil, replace: true)
@@ -1444,7 +1493,7 @@ module_eval(<<'.,.,', 'parser.y', 174)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 180)
+module_eval(<<'.,.,', 'parser.y', 189)
def _reduce_44(val, _values, result)
val[1].each {|token_declaration|
@grammar.add_term(id: token_declaration[0], alias_name: token_declaration[2], token_id: token_declaration[1], tag: val[0], replace: true)
@@ -1454,7 +1503,7 @@ module_eval(<<'.,.,', 'parser.y', 180)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 186)
+module_eval(<<'.,.,', 'parser.y', 195)
def _reduce_45(val, _values, result)
val[2].each {|token_declaration|
@grammar.add_term(id: token_declaration[0], alias_name: token_declaration[2], token_id: token_declaration[1], tag: val[1], replace: true)
@@ -1464,21 +1513,21 @@ module_eval(<<'.,.,', 'parser.y', 186)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 191)
+module_eval(<<'.,.,', 'parser.y', 200)
def _reduce_46(val, _values, result)
result = [val[0]]
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 192)
+module_eval(<<'.,.,', 'parser.y', 201)
def _reduce_47(val, _values, result)
result = val[0].append(val[1])
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 194)
+module_eval(<<'.,.,', 'parser.y', 203)
def _reduce_48(val, _values, result)
result = val
result
@@ -1493,7 +1542,7 @@ module_eval(<<'.,.,', 'parser.y', 194)
# reduce 52 omitted
-module_eval(<<'.,.,', 'parser.y', 204)
+module_eval(<<'.,.,', 'parser.y', 213)
def _reduce_53(val, _values, result)
result = [{tag: nil, tokens: val[0]}]
@@ -1501,7 +1550,7 @@ module_eval(<<'.,.,', 'parser.y', 204)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 208)
+module_eval(<<'.,.,', 'parser.y', 217)
def _reduce_54(val, _values, result)
result = [{tag: val[0], tokens: val[1]}]
@@ -1509,7 +1558,7 @@ module_eval(<<'.,.,', 'parser.y', 208)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 212)
+module_eval(<<'.,.,', 'parser.y', 221)
def _reduce_55(val, _values, result)
result = val[0].append({tag: val[1], tokens: val[2]})
@@ -1517,14 +1566,14 @@ module_eval(<<'.,.,', 'parser.y', 212)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 215)
+module_eval(<<'.,.,', 'parser.y', 224)
def _reduce_56(val, _values, result)
result = [val[0]]
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 216)
+module_eval(<<'.,.,', 'parser.y', 225)
def _reduce_57(val, _values, result)
result = val[0].append(val[1])
result
@@ -1535,7 +1584,7 @@ module_eval(<<'.,.,', 'parser.y', 216)
# reduce 59 omitted
-module_eval(<<'.,.,', 'parser.y', 223)
+module_eval(<<'.,.,', 'parser.y', 232)
def _reduce_60(val, _values, result)
begin_c_declaration("}")
@@ -1543,7 +1592,7 @@ module_eval(<<'.,.,', 'parser.y', 223)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 227)
+module_eval(<<'.,.,', 'parser.y', 236)
def _reduce_61(val, _values, result)
end_c_declaration
@@ -1551,7 +1600,7 @@ module_eval(<<'.,.,', 'parser.y', 227)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 231)
+module_eval(<<'.,.,', 'parser.y', 240)
def _reduce_62(val, _values, result)
result = val[0].append(val[3])
@@ -1559,7 +1608,7 @@ module_eval(<<'.,.,', 'parser.y', 231)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 235)
+module_eval(<<'.,.,', 'parser.y', 244)
def _reduce_63(val, _values, result)
begin_c_declaration("}")
@@ -1567,7 +1616,7 @@ module_eval(<<'.,.,', 'parser.y', 235)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 239)
+module_eval(<<'.,.,', 'parser.y', 248)
def _reduce_64(val, _values, result)
end_c_declaration
@@ -1575,7 +1624,7 @@ module_eval(<<'.,.,', 'parser.y', 239)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 243)
+module_eval(<<'.,.,', 'parser.y', 252)
def _reduce_65(val, _values, result)
result = [val[2]]
@@ -1583,7 +1632,7 @@ module_eval(<<'.,.,', 'parser.y', 243)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 248)
+module_eval(<<'.,.,', 'parser.y', 257)
def _reduce_66(val, _values, result)
result = [{tag: nil, tokens: val[0]}]
@@ -1591,7 +1640,7 @@ module_eval(<<'.,.,', 'parser.y', 248)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 252)
+module_eval(<<'.,.,', 'parser.y', 261)
def _reduce_67(val, _values, result)
result = [{tag: val[0], tokens: val[1]}]
@@ -1599,22 +1648,22 @@ module_eval(<<'.,.,', 'parser.y', 252)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 256)
+module_eval(<<'.,.,', 'parser.y', 265)
def _reduce_68(val, _values, result)
- result = val[0].append({tag: nil, tokens: val[1]})
+ result = val[0].append({tag: val[1], tokens: val[2]})
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 259)
+module_eval(<<'.,.,', 'parser.y', 268)
def _reduce_69(val, _values, result)
result = [val[0]]
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 260)
+module_eval(<<'.,.,', 'parser.y', 269)
def _reduce_70(val, _values, result)
result = val[0].append(val[1])
result
@@ -1623,16 +1672,16 @@ module_eval(<<'.,.,', 'parser.y', 260)
# reduce 71 omitted
-module_eval(<<'.,.,', 'parser.y', 264)
+module_eval(<<'.,.,', 'parser.y', 273)
def _reduce_72(val, _values, result)
- raise "Ident after %prec" if @prec_seen
+ on_action_error("ident after %prec", val[0]) if @prec_seen
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 265)
+module_eval(<<'.,.,', 'parser.y', 274)
def _reduce_73(val, _values, result)
- raise "Char after %prec" if @prec_seen
+ on_action_error("char after %prec", val[0]) if @prec_seen
result
end
.,.,
@@ -1645,29 +1694,39 @@ module_eval(<<'.,.,', 'parser.y', 265)
# reduce 77 omitted
-module_eval(<<'.,.,', 'parser.y', 275)
+module_eval(<<'.,.,', 'parser.y', 284)
def _reduce_78(val, _values, result)
lhs = val[0]
lhs.alias_name = val[1]
- val[3].each {|hash|
- @grammar.add_rule(lhs: lhs, rhs: hash[:rhs], lineno: hash[:lineno])
- }
+ val[3].each do |builder|
+ builder.lhs = lhs
+ builder.complete_input
+ @grammar.add_rule_builder(builder)
+ end
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 284)
+module_eval(<<'.,.,', 'parser.y', 295)
def _reduce_79(val, _values, result)
- result = [{rhs: val[0], lineno: val[0].first&.line || @lexer.line - 1}]
+ builder = val[0]
+ if !builder.line
+ builder.line = @lexer.line - 1
+ end
+ result = [builder]
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 288)
+module_eval(<<'.,.,', 'parser.y', 303)
def _reduce_80(val, _values, result)
- result = val[0].append({rhs: val[2], lineno: val[2].first&.line || @lexer.line - 1})
+ builder = val[2]
+ if !builder.line
+ builder.line = @lexer.line - 1
+ end
+ result = val[0].append(builder)
result
end
@@ -1675,65 +1734,73 @@ module_eval(<<'.,.,', 'parser.y', 288)
# reduce 81 omitted
-module_eval(<<'.,.,', 'parser.y', 294)
+module_eval(<<'.,.,', 'parser.y', 313)
def _reduce_82(val, _values, result)
reset_precs
- result = []
+ result = Grammar::RuleBuilder.new(@rule_counter, @midrule_action_counter)
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 299)
+module_eval(<<'.,.,', 'parser.y', 318)
def _reduce_83(val, _values, result)
reset_precs
- result = []
+ result = Grammar::RuleBuilder.new(@rule_counter, @midrule_action_counter)
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 304)
+module_eval(<<'.,.,', 'parser.y', 323)
def _reduce_84(val, _values, result)
token = val[1]
token.alias_name = val[2]
- result = val[0].append(token)
+ builder = val[0]
+ builder.add_rhs(token)
+ result = builder
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 310)
+module_eval(<<'.,.,', 'parser.y', 331)
def _reduce_85(val, _values, result)
- token = Lrama::Lexer::Token::Parameterizing.new(s_value: val[1])
- result = val[0].append(token)
+ token = Lrama::Lexer::Token::Parameterizing.new(s_value: val[2], location: @lexer.location, args: [val[1]])
+ builder = val[0]
+ builder.add_rhs(token)
+ result = builder
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 315)
+module_eval(<<'.,.,', 'parser.y', 338)
def _reduce_86(val, _values, result)
- token = Lrama::Lexer::Token::Parameterizing.new(s_value: val[1])
- result = val[0].append(token)
+ token = Lrama::Lexer::Token::Parameterizing.new(s_value: val[1].s_value, location: @lexer.location, args: [val[3]])
+ builder = val[0]
+ builder.add_rhs(token)
+ result = builder
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 320)
+module_eval(<<'.,.,', 'parser.y', 345)
def _reduce_87(val, _values, result)
- token = Lrama::Lexer::Token::Parameterizing.new(s_value: val[1])
- result = val[0].append(token)
+ token = Lrama::Lexer::Token::Parameterizing.new(s_value: val[1].s_value, location: @lexer.location, args: [val[3], val[5]])
+ builder = val[0]
+ builder.add_rhs(token)
+ result = builder
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 325)
+module_eval(<<'.,.,', 'parser.y', 352)
def _reduce_88(val, _values, result)
if @prec_seen
- raise "Multiple User_code after %prec" if @code_after_prec
+ on_action_error("multiple User_code after %prec", val[0]) if @code_after_prec
@code_after_prec = true
end
begin_c_declaration("}")
@@ -1742,7 +1809,7 @@ module_eval(<<'.,.,', 'parser.y', 325)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 333)
+module_eval(<<'.,.,', 'parser.y', 360)
def _reduce_89(val, _values, result)
end_c_declaration
@@ -1750,21 +1817,25 @@ module_eval(<<'.,.,', 'parser.y', 333)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 337)
+module_eval(<<'.,.,', 'parser.y', 364)
def _reduce_90(val, _values, result)
token = val[3]
token.alias_name = val[6]
- result = val[0].append(token)
+ builder = val[0]
+ builder.user_code = token
+ result = builder
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 343)
+module_eval(<<'.,.,', 'parser.y', 372)
def _reduce_91(val, _values, result)
sym = @grammar.find_symbol_by_id!(val[2])
- result = val[0].append(sym)
@prec_seen = true
+ builder = val[0]
+ builder.precedence_sym = sym
+ result = builder
result
end
@@ -1772,19 +1843,25 @@ module_eval(<<'.,.,', 'parser.y', 343)
# reduce 92 omitted
-module_eval(<<'.,.,', 'parser.y', 349)
- def _reduce_93(val, _values, result)
+# reduce 93 omitted
+
+# reduce 94 omitted
+
+# reduce 95 omitted
+
+module_eval(<<'.,.,', 'parser.y', 384)
+ def _reduce_96(val, _values, result)
result = val[1].s_value
result
end
.,.,
-# reduce 94 omitted
+# reduce 97 omitted
-# reduce 95 omitted
+# reduce 98 omitted
-module_eval(<<'.,.,', 'parser.y', 356)
- def _reduce_96(val, _values, result)
+module_eval(<<'.,.,', 'parser.y', 391)
+ def _reduce_99(val, _values, result)
begin_c_declaration('\Z')
@grammar.epilogue_first_lineno = @lexer.line + 1
@@ -1792,8 +1869,8 @@ module_eval(<<'.,.,', 'parser.y', 356)
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 361)
- def _reduce_97(val, _values, result)
+module_eval(<<'.,.,', 'parser.y', 396)
+ def _reduce_100(val, _values, result)
end_c_declaration
@grammar.epilogue = val[2].s_value
@@ -1801,36 +1878,36 @@ module_eval(<<'.,.,', 'parser.y', 361)
end
.,.,
-# reduce 98 omitted
+# reduce 101 omitted
-# reduce 99 omitted
+# reduce 102 omitted
-# reduce 100 omitted
+# reduce 103 omitted
-# reduce 101 omitted
+# reduce 104 omitted
-# reduce 102 omitted
+# reduce 105 omitted
-module_eval(<<'.,.,', 'parser.y', 372)
- def _reduce_103(val, _values, result)
+module_eval(<<'.,.,', 'parser.y', 407)
+ def _reduce_106(val, _values, result)
result = [val[0]]
result
end
.,.,
-module_eval(<<'.,.,', 'parser.y', 373)
- def _reduce_104(val, _values, result)
+module_eval(<<'.,.,', 'parser.y', 408)
+ def _reduce_107(val, _values, result)
result = val[0].append(val[1])
result
end
.,.,
-# reduce 105 omitted
+# reduce 108 omitted
-# reduce 106 omitted
+# reduce 109 omitted
-module_eval(<<'.,.,', 'parser.y', 378)
- def _reduce_107(val, _values, result)
+module_eval(<<'.,.,', 'parser.y', 413)
+ def _reduce_110(val, _values, result)
result = Lrama::Lexer::Token::Ident.new(s_value: val[0])
result
end
diff --git a/tool/lrama/lib/lrama/report/profile.rb b/tool/lrama/lib/lrama/report/profile.rb
index 8265d94c2f..36156800a4 100644
--- a/tool/lrama/lib/lrama/report/profile.rb
+++ b/tool/lrama/lib/lrama/report/profile.rb
@@ -1,18 +1,7 @@
module Lrama
class Report
module Profile
- # 1. Wrap target method with Profile.report_profile like below:
- #
- # Lrama::Report::Profile.report_profile { method }
- #
- # 2. Run lrama command, for example
- #
- # $ ./exe/lrama --trace=time spec/fixtures/integration/ruby_3_2_0/parse.tmp.y
- #
- # 3. Generate html file
- #
- # $ stackprof --d3-flamegraph tmp/stackprof-cpu-myapp.dump > tmp/flamegraph.html
- #
+ # See "Profiling Lrama" in README.md for how to use.
def self.report_profile
require "stackprof"
diff --git a/tool/lrama/lib/lrama/version.rb b/tool/lrama/lib/lrama/version.rb
index 49ff1ec97c..52e9bab2e3 100644
--- a/tool/lrama/lib/lrama/version.rb
+++ b/tool/lrama/lib/lrama/version.rb
@@ -1,3 +1,3 @@
module Lrama
- VERSION = "0.5.9".freeze
+ VERSION = "0.5.10".freeze
end
diff --git a/tool/lrama/template/bison/yacc.c b/tool/lrama/template/bison/yacc.c
index 90ea228e22..0e17b46f25 100644
--- a/tool/lrama/template/bison/yacc.c
+++ b/tool/lrama/template/bison/yacc.c
@@ -69,11 +69,13 @@
<%# b4_user_pre_prologue -%>
+<%- if output.aux.prologue -%>
/* First part of user prologue. */
#line <%= output.aux.prologue_first_lineno %> "<%= output.grammar_file_path %>"
<%= output.aux.prologue %>
#line [@oline@] [@ofile@]
+<%- end -%>
<%# b4_cast_define -%>
# ifndef YY_CAST
@@ -1483,6 +1485,7 @@ YYLTYPE yylloc = yyloc_default;
<%# b4_declare_parser_state_variables -%>
/* Number of syntax errors so far. */
int yynerrs = 0;
+ YY_USE (yynerrs); /* Silence compiler warning. */
yy_state_fast_t yystate = 0;
/* Number of tokens to shift before error messages enabled. */
@@ -2043,7 +2046,9 @@ yyreturnlab:
}
<%# b4_percent_code_get([[epilogue]]) -%>
+<%- if output.aux.epilogue -%>
#line <%= output.aux.epilogue_first_lineno - 1 %> "<%= output.grammar_file_path %>"
<%= output.aux.epilogue -%>
+<%- end -%>