aboutsummaryrefslogtreecommitdiffstats
path: root/lib/prism
diff options
context:
space:
mode:
authorMartin Emde <martin.emde@gmail.com>2023-11-29 20:02:43 -0800
committergit <svn-admin@ruby-lang.org>2023-11-30 14:10:04 +0000
commitaac8be803409a18f6c32b438d154432eeb6f49e8 (patch)
tree9543753bc0eaf5edd85bae3f004c484f33e73f98 /lib/prism
parent1802d14ca8924bd67e0915c5ad9f1fad5dba0602 (diff)
downloadruby-aac8be803409a18f6c32b438d154432eeb6f49e8.tar.gz
[ruby/prism] Fix lex_compat for `<<HEREDOC # comment` at EOF
Fixes https://github.com/ruby/prism/pull/1874 https://github.com/ruby/prism/commit/304dd78dd2
Diffstat (limited to 'lib/prism')
-rw-r--r--lib/prism/lex_compat.rb31
1 files changed, 23 insertions, 8 deletions
diff --git a/lib/prism/lex_compat.rb b/lib/prism/lex_compat.rb
index 66be275bcd..0336f48d6d 100644
--- a/lib/prism/lex_compat.rb
+++ b/lib/prism/lex_compat.rb
@@ -729,16 +729,31 @@ module Prism
# comment and there is still whitespace after the comment, then
# Ripper will append a on_nl token (even though there isn't
# necessarily a newline). We mirror that here.
- start_offset = previous_token.location.end_offset
- end_offset = token.location.start_offset
-
- if previous_token.type == :COMMENT && start_offset < end_offset
- if bom
- start_offset += 3
- end_offset += 3
+ if previous_token.type == :COMMENT
+ # If the token before the comment was a heredoc end, then
+ # the comment's end_offset is before the heredoc end token.
+ # This is not the correct offset to use for figuring out if
+ # there is trailing whitespace after the comment.
+ # Use the end_offset of the heredoc end instead.
+ before_comment = result_value[index - 2]
+ before_comment &&= before_comment[0]
+
+ if before_comment&.type == :HEREDOC_END
+ start_offset = before_comment.location.end_offset
+ else
+ start_offset = previous_token.location.end_offset
end
- tokens << Token.new([[lineno, 0], :on_nl, source.byteslice(start_offset...end_offset), lex_state])
+ end_offset = token.location.start_offset
+
+ if start_offset < end_offset
+ if bom
+ start_offset += 3
+ end_offset += 3
+ end
+
+ tokens << Token.new([[lineno, 0], :on_nl, source.byteslice(start_offset...end_offset), lex_state])
+ end
end
Token.new([[lineno, column], event, value, lex_state])