mirror of
https://github.com/rust-lang/rust-analyzer.git
synced 2025-10-01 11:31:15 +00:00
Cleanup
This commit is contained in:
parent
faddea9353
commit
fd8622e1ec
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
use parser::{Token, TokenSource};
|
use parser::{Token, TokenSource};
|
||||||
use std::cell::{Cell, Ref, RefCell};
|
use std::cell::{Cell, Ref, RefCell};
|
||||||
use syntax::{tokenize, SmolStr, SyntaxKind, SyntaxKind::*, T};
|
use syntax::{lex_single_syntax_kind, SmolStr, SyntaxKind, SyntaxKind::*, T};
|
||||||
use tt::buffer::{Cursor, TokenBuffer};
|
use tt::buffer::{Cursor, TokenBuffer};
|
||||||
|
|
||||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||||
@ -155,17 +155,15 @@ fn convert_delim(d: Option<tt::DelimiterKind>, closing: bool) -> TtToken {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn convert_literal(l: &tt::Literal) -> TtToken {
|
fn convert_literal(l: &tt::Literal) -> TtToken {
|
||||||
let mut kinds = tokenize(&l.text).0.into_iter().map(|token| token.kind);
|
let is_negated = l.text.starts_with('-');
|
||||||
|
let inner_text = &l.text[if is_negated { 1 } else { 0 }..];
|
||||||
|
|
||||||
let kind = match kinds.next() {
|
let kind = lex_single_syntax_kind(inner_text)
|
||||||
Some(kind) if kind.is_literal() => Some(kind),
|
.map(|(kind, _error)| kind)
|
||||||
Some(SyntaxKind::MINUS) => match kinds.next() {
|
.filter(|kind| {
|
||||||
Some(kind) if kind.is_literal() => Some(kind),
|
kind.is_literal() && (!is_negated || matches!(kind, FLOAT_NUMBER | INT_NUMBER))
|
||||||
_ => None,
|
})
|
||||||
},
|
.unwrap_or_else(|| panic!("Fail to convert given literal {:#?}", &l));
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
.unwrap_or_else(|| panic!("Fail to convert given literal {:#?}", &l));
|
|
||||||
|
|
||||||
TtToken { kind, is_joint_to_next: false, text: l.text.clone() }
|
TtToken { kind, is_joint_to_next: false, text: l.text.clone() }
|
||||||
}
|
}
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
//! Lexer analyzes raw input string and produces lexemes (tokens).
|
//! Lexer analyzes raw input string and produces lexemes (tokens).
|
||||||
//! It is just a bridge to `rustc_lexer`.
|
//! It is just a bridge to `rustc_lexer`.
|
||||||
|
|
||||||
use rustc_lexer::{LiteralKind as LK, RawStrError};
|
|
||||||
|
|
||||||
use std::convert::TryInto;
|
use std::convert::TryInto;
|
||||||
|
|
||||||
|
use rustc_lexer::{LiteralKind as LK, RawStrError};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
SyntaxError,
|
SyntaxError,
|
||||||
SyntaxKind::{self, *},
|
SyntaxKind::{self, *},
|
||||||
@ -61,17 +61,18 @@ pub fn tokenize(text: &str) -> (Vec<Token>, Vec<SyntaxError>) {
|
|||||||
(tokens, errors)
|
(tokens, errors)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `SyntaxKind` and `Option<SyntaxError>` of the first token
|
/// Returns `SyntaxKind` and `Option<SyntaxError>` if `text` parses as a single token.
|
||||||
/// encountered at the beginning of the string.
|
|
||||||
///
|
///
|
||||||
/// Returns `None` if the string contains zero *or two or more* tokens.
|
/// Returns `None` if the string contains zero *or two or more* tokens.
|
||||||
/// The token is malformed if the returned error is not `None`.
|
/// The token is malformed if the returned error is not `None`.
|
||||||
///
|
///
|
||||||
/// Beware that unescape errors are not checked at tokenization time.
|
/// Beware that unescape errors are not checked at tokenization time.
|
||||||
pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> {
|
pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxError>)> {
|
||||||
lex_first_token(text)
|
let (first_token, err) = lex_first_token(text)?;
|
||||||
.filter(|(token, _)| token.len == TextSize::of(text))
|
if first_token.len != TextSize::of(text) {
|
||||||
.map(|(token, error)| (token.kind, error))
|
return None;
|
||||||
|
}
|
||||||
|
Some((first_token.kind, err))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The same as `lex_single_syntax_kind()` but returns only `SyntaxKind` and
|
/// The same as `lex_single_syntax_kind()` but returns only `SyntaxKind` and
|
||||||
@ -79,9 +80,11 @@ pub fn lex_single_syntax_kind(text: &str) -> Option<(SyntaxKind, Option<SyntaxEr
|
|||||||
///
|
///
|
||||||
/// Beware that unescape errors are not checked at tokenization time.
|
/// Beware that unescape errors are not checked at tokenization time.
|
||||||
pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> {
|
pub fn lex_single_valid_syntax_kind(text: &str) -> Option<SyntaxKind> {
|
||||||
lex_first_token(text)
|
let (single_token, err) = lex_single_syntax_kind(text)?;
|
||||||
.filter(|(token, error)| !error.is_some() && token.len == TextSize::of(text))
|
if err.is_some() {
|
||||||
.map(|(token, _error)| token.kind)
|
return None;
|
||||||
|
}
|
||||||
|
Some(single_token)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `SyntaxKind` and `Option<SyntaxError>` of the first token
|
/// Returns `SyntaxKind` and `Option<SyntaxError>` of the first token
|
||||||
|
Loading…
x
Reference in New Issue
Block a user