Discard empty lines at tokenizer level
This commit is contained in:
parent
eb4d0a3735
commit
c1eaae4b26
|
|
@ -27,10 +27,7 @@ const parseHeader = (tokens: Token[]): [Header, Token[]] => {
|
|||
|
||||
while (tokens[0]) {
|
||||
const token = tokens[0];
|
||||
if (token.type === "text" && token.value === "") {
|
||||
// Ignore empty lines before header
|
||||
tokens.shift();
|
||||
} else if (token.type === "header" && token.value === "Name") {
|
||||
if (token.type === "header" && token.value === "Name") {
|
||||
tokens.shift();
|
||||
const [name, rest] = extractText(tokens);
|
||||
header.name = name;
|
||||
|
|
@ -73,9 +70,6 @@ const parseIntervalComments = (tokens: Token[]): [Comment[], Token[]] => {
|
|||
text: text.value,
|
||||
});
|
||||
tokens = rest;
|
||||
} else if (start.type === "text" && start.value === "") {
|
||||
// skip empty lines
|
||||
tokens.shift();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
|
|
@ -135,8 +129,6 @@ const parseIntervals = (tokens: Token[]): Interval[] => {
|
|||
comments,
|
||||
});
|
||||
tokens = rest;
|
||||
} else if (token.type === "text" && token.value === "") {
|
||||
// Ignore empty lines
|
||||
} else {
|
||||
throw new ParseError(`Unexpected token ${tokenToString(token)}`, token.loc);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -150,19 +150,38 @@ const tokenizeLabeledLine = (line: string, row: number): Token[] | undefined =>
|
|||
throw new ParseError(`Unknown label "${label}:"`, { row, col: 0 });
|
||||
};
|
||||
|
||||
const tokenizeText = (line: string, row: number): TextToken[] => {
|
||||
const tokenizeText = (line: string, row: number, afterDescription: boolean): TextToken[] => {
|
||||
if (!afterDescription && line.trim() === "") {
|
||||
// Ignore empty lines in most cases.
|
||||
// They're only significant inside description.
|
||||
return [];
|
||||
}
|
||||
return [{ type: "text", value: line.trim(), loc: { row, col: 0 } }];
|
||||
};
|
||||
|
||||
const tokenizeRule = (line: string, row: number): Token[] => {
|
||||
return tokenizeLabeledLine(line, row) || tokenizeComment(line, row) || tokenizeText(line, row);
|
||||
const tokenizeRule = (line: string, row: number, afterDescription: boolean): Token[] => {
|
||||
return tokenizeLabeledLine(line, row) || tokenizeComment(line, row) || tokenizeText(line, row, afterDescription);
|
||||
};
|
||||
|
||||
// True when last token is "Description:" (optionally followed by any number of text tokens)
|
||||
const isAfterDescription = (tokens: Token[]): boolean => {
|
||||
for (let i = tokens.length - 1; i >= 0; i--) {
|
||||
const token = tokens[i];
|
||||
if (token.type === "text") {
|
||||
// skip
|
||||
}
|
||||
if (token.type === "header" && token.value === "Description") {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
};
|
||||
|
||||
export const tokenize = (file: string): Token[] => {
|
||||
const tokens: Token[] = [];
|
||||
|
||||
file.split("\n").map((line, row) => {
|
||||
tokens.push(...tokenizeRule(line, row));
|
||||
tokens.push(...tokenizeRule(line, row, isAfterDescription(tokens)));
|
||||
});
|
||||
|
||||
return tokens;
|
||||
|
|
|
|||
Loading…
Reference in New Issue