mirror of
https://github.com/rust-lang/rust-analyzer.git
synced 2025-10-01 11:31:15 +00:00
feat(lexer): Allow including frontmatter with 'tokenize'
This commit is contained in:
parent
3a5e13a8b3
commit
f4d9018a48
@ -11,8 +11,8 @@
|
|||||||
use std::ops;
|
use std::ops;
|
||||||
|
|
||||||
use rustc_literal_escaper::{
|
use rustc_literal_escaper::{
|
||||||
EscapeError, Mode, unescape_byte, unescape_byte_str, unescape_c_str, unescape_char,
|
unescape_byte, unescape_byte_str, unescape_c_str, unescape_char, unescape_str, EscapeError,
|
||||||
unescape_str,
|
Mode,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
@ -44,7 +44,9 @@ impl<'a> LexedStr<'a> {
|
|||||||
|
|
||||||
// Re-create the tokenizer from scratch every token because `GuardedStrPrefix` is one token in the lexer
|
// Re-create the tokenizer from scratch every token because `GuardedStrPrefix` is one token in the lexer
|
||||||
// but we want to split it to two in edition <2024.
|
// but we want to split it to two in edition <2024.
|
||||||
while let Some(token) = rustc_lexer::tokenize(&text[conv.offset..]).next() {
|
while let Some(token) =
|
||||||
|
rustc_lexer::tokenize(&text[conv.offset..], rustc_lexer::FrontmatterAllowed::No).next()
|
||||||
|
{
|
||||||
let token_text = &text[conv.offset..][..token.len as usize];
|
let token_text = &text[conv.offset..][..token.len as usize];
|
||||||
|
|
||||||
conv.extend_token(&token.kind, token_text);
|
conv.extend_token(&token.kind, token_text);
|
||||||
@ -58,7 +60,7 @@ impl<'a> LexedStr<'a> {
|
|||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
let token = rustc_lexer::tokenize(text).next()?;
|
let token = rustc_lexer::tokenize(text, rustc_lexer::FrontmatterAllowed::No).next()?;
|
||||||
if token.len as usize != text.len() {
|
if token.len as usize != text.len() {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
@ -121,7 +121,7 @@ pub(super) fn literal_from_str<Span: Copy>(
|
|||||||
use proc_macro::bridge::LitKind;
|
use proc_macro::bridge::LitKind;
|
||||||
use rustc_lexer::{LiteralKind, Token, TokenKind};
|
use rustc_lexer::{LiteralKind, Token, TokenKind};
|
||||||
|
|
||||||
let mut tokens = rustc_lexer::tokenize(s);
|
let mut tokens = rustc_lexer::tokenize(s, rustc_lexer::FrontmatterAllowed::No);
|
||||||
let minus_or_lit = tokens.next().unwrap_or(Token { kind: TokenKind::Eof, len: 0 });
|
let minus_or_lit = tokens.next().unwrap_or(Token { kind: TokenKind::Eof, len: 0 });
|
||||||
|
|
||||||
let lit = if minus_or_lit.kind == TokenKind::Minus {
|
let lit = if minus_or_lit.kind == TokenKind::Minus {
|
||||||
|
@ -579,7 +579,7 @@ where
|
|||||||
{
|
{
|
||||||
use rustc_lexer::LiteralKind;
|
use rustc_lexer::LiteralKind;
|
||||||
|
|
||||||
let token = rustc_lexer::tokenize(text).next_tuple();
|
let token = rustc_lexer::tokenize(text, rustc_lexer::FrontmatterAllowed::No).next_tuple();
|
||||||
let Some((rustc_lexer::Token {
|
let Some((rustc_lexer::Token {
|
||||||
kind: rustc_lexer::TokenKind::Literal { kind, suffix_start },
|
kind: rustc_lexer::TokenKind::Literal { kind, suffix_start },
|
||||||
..
|
..
|
||||||
|
Loading…
x
Reference in New Issue
Block a user