正则表达式兼容问题

main
一杯沧海 1 year ago
parent 53a6a6f926
commit dbbe3f9af3
  1. 2
      src/utils/marked/marked.scss
  2. 3
      src/utils/marked/marked.ts
  3. 29
      src/utils/marked/src/Hooks.ts
  4. 406
      src/utils/marked/src/Instance.ts
  5. 516
      src/utils/marked/src/Lexer.ts
  6. 217
      src/utils/marked/src/MarkedOptions.ts
  7. 281
      src/utils/marked/src/Parser.ts
  8. 169
      src/utils/marked/src/Renderer.ts
  9. 51
      src/utils/marked/src/Slugger.ts
  10. 42
      src/utils/marked/src/TextRenderer.ts
  11. 813
      src/utils/marked/src/Tokenizer.ts
  12. 201
      src/utils/marked/src/Tokens.ts
  13. 35
      src/utils/marked/src/defaults.ts
  14. 265
      src/utils/marked/src/helpers.ts
  15. 143
      src/utils/marked/src/marked.ts
  16. 384
      src/utils/marked/src/rules.ts
  17. 2
      src/utils/marked/utils.ts

@ -138,7 +138,7 @@
.image { .image {
display: inline-block; display: inline-block;
max-width: 100%; width: 100%;
height: auto; height: auto;
} }

@ -1,9 +1,10 @@
import {Token, Tokens, lexer} from "marked"
import {Attributes, ComponentType, createElement, ReactElement, ReactNode} from "react" import {Attributes, ComponentType, createElement, ReactElement, ReactNode} from "react"
import {View, Text, Image, Video, Audio} from "@tarojs/components" import {View, Text, Image, Video, Audio} from "@tarojs/components"
import {parseHeadingId, walkTokens, unescape} from "./utils" import {parseHeadingId, walkTokens, unescape} from "./utils"
import TabLink from "./components/Tablink"; import TabLink from "./components/Tablink";
import './marked.scss' import './marked.scss'
import {Token, Tokens } from "./src/Tokens";
import { lexer } from "./src/marked";
/** /**
* Markdown * Markdown

@ -0,0 +1,29 @@
import { _defaults } from './defaults';
import type { MarkedOptions } from './MarkedOptions';
export class _Hooks {
options: MarkedOptions;
constructor(options?: MarkedOptions) {
this.options = options || _defaults;
}
static passThroughHooks = new Set([
'preprocess',
'postprocess'
]);
/**
* Process markdown before marked
*/
preprocess(markdown: string) {
return markdown;
}
/**
* Process HTML after marked is finished
*/
postprocess(html: string) {
return html;
}
}

@ -0,0 +1,406 @@
import { _getDefaults } from './defaults';
import { _Lexer } from './Lexer';
import { _Parser } from './Parser';
import { _Hooks } from './Hooks';
import { _Renderer } from './Renderer';
import { _Tokenizer } from './Tokenizer';
import { _TextRenderer } from './TextRenderer';
import { _Slugger } from './Slugger';
import {
checkDeprecations,
escape
} from './helpers';
import type { MarkedExtension, MarkedOptions } from './MarkedOptions';
import type { Token, Tokens, TokensList } from './Tokens';
export type ResultCallback = (error: Error | null, parseResult?: string) => undefined | void;
type UnknownFunction = (...args: unknown[]) => unknown;
type GenericRendererFunction = (...args: unknown[]) => string | false;
export class Marked {
defaults = _getDefaults();
options = this.setOptions;
parse = this.#parseMarkdown(_Lexer.lex, _Parser.parse);
parseInline = this.#parseMarkdown(_Lexer.lexInline, _Parser.parseInline);
Parser = _Parser;
parser = _Parser.parse;
Renderer = _Renderer;
TextRenderer = _TextRenderer;
Lexer = _Lexer;
lexer = _Lexer.lex;
Tokenizer = _Tokenizer;
Slugger = _Slugger;
Hooks = _Hooks;
constructor(...args: MarkedExtension[]) {
this.use(...args);
}
/**
* Run callback for every token
*/
walkTokens <T = void>(tokens: Token[] | TokensList, callback: (token: Token) => T | T[]) {
let values: T[] = [];
for (const token of tokens) {
values = values.concat(callback.call(this, token));
switch (token.type) {
case 'table': {
const tableToken = token as Tokens.Table;
for (const cell of tableToken.header) {
values = values.concat(this.walkTokens(cell.tokens, callback));
}
for (const row of tableToken.rows) {
for (const cell of row) {
values = values.concat(this.walkTokens(cell.tokens, callback));
}
}
break;
}
case 'list': {
const listToken = token as Tokens.List;
values = values.concat(this.walkTokens(listToken.items, callback));
break;
}
default: {
const genericToken = token as Tokens.Generic;
if (this.defaults.extensions?.childTokens?.[genericToken.type]) {
this.defaults.extensions.childTokens[genericToken.type].forEach((childTokens) => {
values = values.concat(this.walkTokens(genericToken[childTokens], callback));
});
} else if (genericToken.tokens) {
values = values.concat(this.walkTokens(genericToken.tokens, callback));
}
}
}
}
return values;
}
use(...args: MarkedExtension[]) {
const extensions: MarkedOptions['extensions'] = this.defaults.extensions || { renderers: {}, childTokens: {} };
args.forEach((pack) => {
// copy options to new object
const opts = { ...pack } as MarkedOptions;
// set async to true if it was set to true before
opts.async = this.defaults.async || opts.async || false;
// ==-- Parse "addon" extensions --== //
if (pack.extensions) {
pack.extensions.forEach((ext) => {
if (!ext.name) {
throw new Error('extension name required');
}
if ('renderer' in ext) { // Renderer extensions
const prevRenderer = extensions.renderers[ext.name];
if (prevRenderer) {
// Replace extension with func to run new extension but fall back if false
extensions.renderers[ext.name] = function(...args) {
let ret = ext.renderer.apply(this, args);
if (ret === false) {
ret = prevRenderer.apply(this, args);
}
return ret;
};
} else {
extensions.renderers[ext.name] = ext.renderer;
}
}
if ('tokenizer' in ext) { // Tokenizer Extensions
if (!ext.level || (ext.level !== 'block' && ext.level !== 'inline')) {
throw new Error("extension level must be 'block' or 'inline'");
}
const extLevel = extensions[ext.level];
if (extLevel) {
extLevel.unshift(ext.tokenizer);
} else {
extensions[ext.level] = [ext.tokenizer];
}
if (ext.start) { // Function to check for start of token
if (ext.level === 'block') {
if (extensions.startBlock) {
extensions.startBlock.push(ext.start);
} else {
extensions.startBlock = [ext.start];
}
} else if (ext.level === 'inline') {
if (extensions.startInline) {
extensions.startInline.push(ext.start);
} else {
extensions.startInline = [ext.start];
}
}
}
}
if ('childTokens' in ext && ext.childTokens) { // Child tokens to be visited by walkTokens
extensions.childTokens[ext.name] = ext.childTokens;
}
});
opts.extensions = extensions;
}
// ==-- Parse "overwrite" extensions --== //
if (pack.renderer) {
const renderer = this.defaults.renderer || new _Renderer(this.defaults);
for (const prop in pack.renderer) {
const rendererFunc = pack.renderer[prop as keyof MarkedExtension['renderer']] as GenericRendererFunction;
const rendererKey = prop as keyof _Renderer;
const prevRenderer = renderer[rendererKey] as GenericRendererFunction;
// Replace renderer with func to run extension, but fall back if false
renderer[rendererKey] = (...args: unknown[]) => {
let ret = rendererFunc.apply(renderer, args);
if (ret === false) {
ret = prevRenderer.apply(renderer, args);
}
return ret || '';
};
}
opts.renderer = renderer;
}
if (pack.tokenizer) {
const tokenizer = this.defaults.tokenizer || new _Tokenizer(this.defaults);
for (const prop in pack.tokenizer) {
const tokenizerFunc = pack.tokenizer[prop as keyof MarkedExtension['tokenizer']] as UnknownFunction;
const tokenizerKey = prop as keyof _Tokenizer;
const prevTokenizer = tokenizer[tokenizerKey] as UnknownFunction;
// Replace tokenizer with func to run extension, but fall back if false
tokenizer[tokenizerKey] = (...args: unknown[]) => {
let ret = tokenizerFunc.apply(tokenizer, args);
if (ret === false) {
ret = prevTokenizer.apply(tokenizer, args);
}
return ret;
};
}
opts.tokenizer = tokenizer;
}
// ==-- Parse Hooks extensions --== //
if (pack.hooks) {
const hooks = this.defaults.hooks || new _Hooks();
for (const prop in pack.hooks) {
const hooksFunc = pack.hooks[prop as keyof MarkedExtension['hooks']] as UnknownFunction;
const hooksKey = prop as keyof _Hooks;
const prevHook = hooks[hooksKey] as UnknownFunction;
if (_Hooks.passThroughHooks.has(prop)) {
hooks[hooksKey as 'preprocess' | 'postprocess'] = (arg: string | undefined) => {
if (this.defaults.async) {
return Promise.resolve(hooksFunc.call(hooks, arg)).then(ret => {
return prevHook.call(hooks, ret) as string;
});
}
const ret = hooksFunc.call(hooks, arg);
return prevHook.call(hooks, ret) as string;
};
} else {
hooks[hooksKey] = (...args: unknown[]) => {
let ret = hooksFunc.apply(hooks, args);
if (ret === false) {
ret = prevHook.apply(hooks, args);
}
return ret as string;
};
}
}
opts.hooks = hooks;
}
// ==-- Parse WalkTokens extensions --== //
if (pack.walkTokens) {
const walkTokens = this.defaults.walkTokens;
const packWalktokens = pack.walkTokens;
opts.walkTokens = function(token) {
let values: Array<Promise<void> | void | unknown> = [];
values.push(packWalktokens.call(this, token));
if (walkTokens) {
values = values.concat(walkTokens.call(this, token));
}
return values;
};
}
this.defaults = { ...this.defaults, ...opts };
});
return this;
}
setOptions(opt: MarkedOptions) {
this.defaults = { ...this.defaults, ...opt };
return this;
}
#parseMarkdown(lexer: (src: string, options?: MarkedOptions) => TokensList | Token[], parser: (tokens: Token[], options?: MarkedOptions) => string) {
return (src: string, optOrCallback?: MarkedOptions | ResultCallback | undefined | null, callback?: ResultCallback | undefined): string | Promise<string | undefined> | undefined => {
if (typeof optOrCallback === 'function') {
callback = optOrCallback;
optOrCallback = null;
}
const origOpt = { ...optOrCallback };
const opt = { ...this.defaults, ...origOpt };
// Show warning if an extension set async to true but the parse was called with async: false
if (this.defaults.async === true && origOpt.async === false) {
if (!opt.silent) {
console.warn('marked(): The async option was set to true by an extension. The async: false option sent to parse will be ignored.');
}
opt.async = true;
}
const throwError = this.#onError(!!opt.silent, !!opt.async, callback);
// throw error in case of non string input
if (typeof src === 'undefined' || src === null) {
return throwError(new Error('marked(): input parameter is undefined or null'));
}
if (typeof src !== 'string') {
return throwError(new Error('marked(): input parameter is of type '
+ Object.prototype.toString.call(src) + ', string expected'));
}
checkDeprecations(opt, callback);
if (opt.hooks) {
opt.hooks.options = opt;
}
if (callback) {
const resultCallback = callback;
const highlight = opt.highlight;
let tokens: TokensList | Token[];
try {
if (opt.hooks) {
src = opt.hooks.preprocess(src) as string;
}
tokens = lexer(src, opt);
} catch (e) {
return throwError(e as Error);
}
const done = (err?: Error) => {
let out;
if (!err) {
try {
if (opt.walkTokens) {
this.walkTokens(tokens, opt.walkTokens);
}
out = parser(tokens, opt);
if (opt.hooks) {
out = opt.hooks.postprocess(out) as string;
}
} catch (e) {
err = e as Error;
}
}
opt.highlight = highlight;
return err
? throwError(err)
: resultCallback(null, out) as undefined;
};
if (!highlight || highlight.length < 3) {
return done();
}
delete opt.highlight;
if (!tokens.length) return done();
let pending = 0;
this.walkTokens(tokens, (token) => {
if (token.type === 'code') {
pending++;
setTimeout(() => {
highlight(token.text, token.lang, (err, code) => {
if (err) {
return done(err);
}
if (code != null && code !== token.text) {
token.text = code;
token.escaped = true;
}
pending--;
if (pending === 0) {
done();
}
});
}, 0);
}
});
if (pending === 0) {
done();
}
return;
}
if (opt.async) {
return Promise.resolve(opt.hooks ? opt.hooks.preprocess(src) : src)
.then(src => lexer(src, opt))
.then(tokens => opt.walkTokens ? Promise.all(this.walkTokens(tokens, opt.walkTokens)).then(() => tokens) : tokens)
.then(tokens => parser(tokens, opt))
.then(html => opt.hooks ? opt.hooks.postprocess(html) : html)
.catch(throwError);
}
try {
if (opt.hooks) {
src = opt.hooks.preprocess(src) as string;
}
const tokens = lexer(src, opt);
if (opt.walkTokens) {
this.walkTokens(tokens, opt.walkTokens);
}
let html = parser(tokens, opt);
if (opt.hooks) {
html = opt.hooks.postprocess(html) as string;
}
return html;
} catch (e) {
return throwError(e as Error);
}
};
}
#onError(silent: boolean, async: boolean, callback?: ResultCallback) {
return (e: Error): string | Promise<string> | undefined => {
e.message += '\nPlease report this to https://github.com/markedjs/marked.';
if (silent) {
const msg = '<p>An error occurred:</p><pre>'
+ escape(e.message + '', true)
+ '</pre>';
if (async) {
return Promise.resolve(msg);
}
if (callback) {
callback(null, msg);
return;
}
return msg;
}
if (async) {
return Promise.reject(e);
}
if (callback) {
callback(e);
return;
}
throw e;
};
}
}

@ -0,0 +1,516 @@
import { _Tokenizer } from './Tokenizer';
import { _defaults } from './defaults';
import { block, inline } from './rules';
import type { Token, TokensList, Tokens } from './Tokens';
import type { MarkedOptions, TokenizerExtension } from './MarkedOptions';
import type { Rules } from './rules';
/**
* smartypants text replacement
*/
function smartypants(text: string) {
return text
// em-dashes
.replace(/---/g, '\u2014')
// en-dashes
.replace(/--/g, '\u2013')
// opening singles
.replace(/(^|[-\u2014/(\[{"\s])'/g, '$1\u2018')
// closing singles & apostrophes
.replace(/'/g, '\u2019')
// opening doubles
.replace(/(^|[-\u2014/(\[{\u2018\s])"/g, '$1\u201c')
// closing doubles
.replace(/"/g, '\u201d')
// ellipses
.replace(/\.{3}/g, '\u2026');
}
/**
* mangle email addresses
*/
function mangle(text: string) {
let out = '';
for (let i = 0; i < text.length; i++) {
const ch = Math.random() > 0.5
? 'x' + text.charCodeAt(i).toString(16)
: text.charCodeAt(i).toString();
out += '&#' + ch + ';';
}
return out;
}
/**
* Block Lexer
*/
export class _Lexer {
tokens: TokensList;
options: MarkedOptions;
state: {
inLink: boolean;
inRawBlock: boolean;
top: boolean;
};
private tokenizer: _Tokenizer;
private inlineQueue: {src: string, tokens: Token[]}[];
constructor(options?: MarkedOptions) {
// TokenList cannot be created in one go
// @ts-expect-error
this.tokens = [];
this.tokens.links = Object.create(null);
this.options = options || _defaults;
this.options.tokenizer = this.options.tokenizer || new _Tokenizer();
this.tokenizer = this.options.tokenizer;
this.tokenizer.options = this.options;
this.tokenizer.lexer = this;
this.inlineQueue = [];
this.state = {
inLink: false,
inRawBlock: false,
top: true
};
const rules = {
block: block.normal,
inline: inline.normal
};
if (this.options.pedantic) {
rules.block = block.pedantic;
rules.inline = inline.pedantic;
} else if (this.options.gfm) {
rules.block = block.gfm;
if (this.options.breaks) {
rules.inline = inline.breaks;
} else {
rules.inline = inline.gfm;
}
}
this.tokenizer.rules = rules;
}
/**
* Expose Rules
*/
static get rules(): Rules {
return {
block,
inline
};
}
/**
* Static Lex Method
*/
static lex(src: string, options?: MarkedOptions) {
const lexer = new _Lexer(options);
return lexer.lex(src);
}
/**
* Static Lex Inline Method
*/
static lexInline(src: string, options?: MarkedOptions) {
const lexer = new _Lexer(options);
return lexer.inlineTokens(src);
}
/**
* Preprocessing
*/
lex(src: string) {
src = src
.replace(/\r\n|\r/g, '\n');
this.blockTokens(src, this.tokens);
let next;
while (next = this.inlineQueue.shift()) {
this.inlineTokens(next.src, next.tokens);
}
return this.tokens;
}
/**
* Lexing
*/
blockTokens(src: string, tokens?: Token[]): Token[];
blockTokens(src: string, tokens?: TokensList): TokensList;
blockTokens(src: string, tokens: Token[] = []) {
if (this.options.pedantic) {
src = src.replace(/\t/g, ' ').replace(/^ +$/gm, '');
} else {
src = src.replace(/^( *)(\t+)/gm, (_, leading, tabs) => {
return leading + ' '.repeat(tabs.length);
});
}
let token: Tokens.Generic | undefined;
let lastToken;
let cutSrc;
let lastParagraphClipped;
while (src) {
if (this.options.extensions
&& this.options.extensions.block
&& this.options.extensions.block.some((extTokenizer: TokenizerExtension['tokenizer']) => {
if (token = extTokenizer.call({ lexer: this }, src, tokens)) {
src = src.substring(token.raw.length);
tokens.push(token);
return true;
}
return false;
})) {
continue;
}
// newline
if (token = this.tokenizer.space(src)) {
src = src.substring(token.raw.length);
if (token.raw.length === 1 && tokens.length > 0) {
// if there's a single \n as a spacer, it's terminating the last line,
// so move it there so that we don't get unecessary paragraph tags
tokens[tokens.length - 1].raw += '\n';
} else {
tokens.push(token);
}
continue;
}
// code
if (token = this.tokenizer.code(src)) {
src = src.substring(token.raw.length);
lastToken = tokens[tokens.length - 1];
// An indented code block cannot interrupt a paragraph.
if (lastToken && (lastToken.type === 'paragraph' || lastToken.type === 'text')) {
lastToken.raw += '\n' + token.raw;
lastToken.text += '\n' + token.text;
this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text;
} else {
tokens.push(token);
}
continue;
}
// fences
if (token = this.tokenizer.fences(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// heading
if (token = this.tokenizer.heading(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// hr
if (token = this.tokenizer.hr(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// blockquote
if (token = this.tokenizer.blockquote(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// list
if (token = this.tokenizer.list(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// html
if (token = this.tokenizer.html(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// def
if (token = this.tokenizer.def(src)) {
src = src.substring(token.raw.length);
lastToken = tokens[tokens.length - 1];
if (lastToken && (lastToken.type === 'paragraph' || lastToken.type === 'text')) {
lastToken.raw += '\n' + token.raw;
lastToken.text += '\n' + token.raw;
this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text;
} else if (!this.tokens.links[token.tag]) {
this.tokens.links[token.tag] = {
href: token.href,
title: token.title
};
}
continue;
}
// table (gfm)
if (token = this.tokenizer.table(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// lheading
if (token = this.tokenizer.lheading(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// top-level paragraph
// prevent paragraph consuming extensions by clipping 'src' to extension start
cutSrc = src;
if (this.options.extensions && this.options.extensions.startBlock) {
let startIndex = Infinity;
const tempSrc = src.slice(1);
let tempStart;
this.options.extensions.startBlock.forEach((getStartIndex) => {
tempStart = getStartIndex.call({ lexer: this }, tempSrc);
if (typeof tempStart === 'number' && tempStart >= 0) { startIndex = Math.min(startIndex, tempStart); }
});
if (startIndex < Infinity && startIndex >= 0) {
cutSrc = src.substring(0, startIndex + 1);
}
}
if (this.state.top && (token = this.tokenizer.paragraph(cutSrc))) {
lastToken = tokens[tokens.length - 1];
if (lastParagraphClipped && lastToken.type === 'paragraph') {
lastToken.raw += '\n' + token.raw;
lastToken.text += '\n' + token.text;
this.inlineQueue.pop();
this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text;
} else {
tokens.push(token);
}
lastParagraphClipped = (cutSrc.length !== src.length);
src = src.substring(token.raw.length);
continue;
}
// text
if (token = this.tokenizer.text(src)) {
src = src.substring(token.raw.length);
lastToken = tokens[tokens.length - 1];
if (lastToken && lastToken.type === 'text') {
lastToken.raw += '\n' + token.raw;
lastToken.text += '\n' + token.text;
this.inlineQueue.pop();
this.inlineQueue[this.inlineQueue.length - 1].src = lastToken.text;
} else {
tokens.push(token);
}
continue;
}
if (src) {
const errMsg = 'Infinite loop on byte: ' + src.charCodeAt(0);
if (this.options.silent) {
console.error(errMsg);
break;
} else {
throw new Error(errMsg);
}
}
}
this.state.top = true;
return tokens;
}
inline(src: string, tokens: Token[] = []) {
this.inlineQueue.push({ src, tokens });
return tokens;
}
/**
* Lexing/Compiling
*/
inlineTokens(src: string, tokens: Token[] = []): Token[] {
let token, lastToken, cutSrc;
// String with links masked to avoid interference with em and strong
let maskedSrc = src;
let match;
let keepPrevChar, prevChar;
// Mask out reflinks
if (this.tokens.links) {
const links = Object.keys(this.tokens.links);
if (links.length > 0) {
while ((match = this.tokenizer.rules.inline.reflinkSearch.exec(maskedSrc)) != null) {
if (links.includes(match[0].slice(match[0].lastIndexOf('[') + 1, -1))) {
maskedSrc = maskedSrc.slice(0, match.index) + '[' + 'a'.repeat(match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.reflinkSearch.lastIndex);
}
}
}
}
// Mask out other blocks
while ((match = this.tokenizer.rules.inline.blockSkip.exec(maskedSrc)) != null) {
maskedSrc = maskedSrc.slice(0, match.index) + '[' + 'a'.repeat(match[0].length - 2) + ']' + maskedSrc.slice(this.tokenizer.rules.inline.blockSkip.lastIndex);
}
// Mask out escaped characters
while ((match = this.tokenizer.rules.inline.anyPunctuation.exec(maskedSrc)) != null) {
maskedSrc = maskedSrc.slice(0, match.index) + '++' + maskedSrc.slice(this.tokenizer.rules.inline.anyPunctuation.lastIndex);
}
while (src) {
if (!keepPrevChar) {
prevChar = '';
}
keepPrevChar = false;
// extensions
if (this.options.extensions
&& this.options.extensions.inline
&& this.options.extensions.inline.some((extTokenizer) => {
if (token = extTokenizer.call({ lexer: this }, src, tokens)) {
src = src.substring(token.raw.length);
tokens.push(token);
return true;
}
return false;
})) {
continue;
}
// escape
if (token = this.tokenizer.escape(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// tag
if (token = this.tokenizer.tag(src)) {
src = src.substring(token.raw.length);
lastToken = tokens[tokens.length - 1];
if (lastToken && token.type === 'text' && lastToken.type === 'text') {
lastToken.raw += token.raw;
lastToken.text += token.text;
} else {
tokens.push(token);
}
continue;
}
// link
if (token = this.tokenizer.link(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// reflink, nolink
if (token = this.tokenizer.reflink(src, this.tokens.links)) {
src = src.substring(token.raw.length);
lastToken = tokens[tokens.length - 1];
if (lastToken && token.type === 'text' && lastToken.type === 'text') {
lastToken.raw += token.raw;
lastToken.text += token.text;
} else {
tokens.push(token);
}
continue;
}
// em & strong
if (token = this.tokenizer.emStrong(src, maskedSrc, prevChar)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// code
if (token = this.tokenizer.codespan(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// br
if (token = this.tokenizer.br(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// del (gfm)
if (token = this.tokenizer.del(src)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// autolink
if (token = this.tokenizer.autolink(src, mangle)) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// url (gfm)
if (!this.state.inLink && (token = this.tokenizer.url(src, mangle))) {
src = src.substring(token.raw.length);
tokens.push(token);
continue;
}
// text
// prevent inlineText consuming extensions by clipping 'src' to extension start
cutSrc = src;
if (this.options.extensions && this.options.extensions.startInline) {
let startIndex = Infinity;
const tempSrc = src.slice(1);
let tempStart;
this.options.extensions.startInline.forEach((getStartIndex) => {
tempStart = getStartIndex.call({ lexer: this }, tempSrc);
if (typeof tempStart === 'number' && tempStart >= 0) { startIndex = Math.min(startIndex, tempStart); }
});
if (startIndex < Infinity && startIndex >= 0) {
cutSrc = src.substring(0, startIndex + 1);
}
}
if (token = this.tokenizer.inlineText(cutSrc, smartypants)) {
src = src.substring(token.raw.length);
if (token.raw.slice(-1) !== '_') { // Track prevChar before string of ____ started
prevChar = token.raw.slice(-1);
}
keepPrevChar = true;
lastToken = tokens[tokens.length - 1];
if (lastToken && lastToken.type === 'text') {
lastToken.raw += token.raw;
lastToken.text += token.text;
} else {
tokens.push(token);
}
continue;
}
if (src) {
const errMsg = 'Infinite loop on byte: ' + src.charCodeAt(0);
if (this.options.silent) {
console.error(errMsg);
break;
} else {
throw new Error(errMsg);
}
}
}
return tokens;
}
}

@ -0,0 +1,217 @@
import type { Token, Tokens, TokensList } from './Tokens';
import type { _Parser } from './Parser';
import type { _Lexer } from './Lexer';
import type { _Renderer } from './Renderer';
import type { _Tokenizer } from './Tokenizer';
export interface SluggerOptions {
/** Generates the next unique slug without updating the internal accumulator. */
dryrun?: boolean;
}
export interface TokenizerThis {
lexer: _Lexer;
}
export type TokenizerExtensionFunction = (this: TokenizerThis, src: string, tokens: Token[] | TokensList) => Tokens.Generic | undefined;
export type TokenizerStartFunction = (this: TokenizerThis, src: string) => number | void;
export interface TokenizerExtension {
name: string;
level: 'block' | 'inline';
start?: TokenizerStartFunction | undefined;
tokenizer: TokenizerExtensionFunction;
childTokens?: string[] | undefined;
}
export interface RendererThis {
parser: _Parser;
}
export type RendererExtensionFunction = (this: RendererThis, token: Tokens.Generic) => string | false | undefined;
export interface RendererExtension {
name: string;
renderer: RendererExtensionFunction;
}
export type TokenizerAndRendererExtension = TokenizerExtension | RendererExtension | (TokenizerExtension & RendererExtension);
type RendererApi = Omit<_Renderer, 'constructor' | 'options'>;
type RendererObject = {
[K in keyof RendererApi]?: (...args: Parameters<RendererApi[K]>) => ReturnType<RendererApi[K]> | false
};
type TokenizerApi = Omit<_Tokenizer, 'constructor' | 'options' | 'rules' | 'lexer'>;
type TokenizerObject = {
[K in keyof TokenizerApi]?: (...args: Parameters<TokenizerApi[K]>) => ReturnType<TokenizerApi[K]> | false
};
export interface MarkedExtension {
/**
* True will tell marked to await any walkTokens functions before parsing the tokens and returning an HTML string.
*/
async?: boolean;
/**
* A prefix URL for any relative link.
* @deprecated Deprecated in v5.0.0 use marked-base-url to prefix url for any relative link.
*/
baseUrl?: string | undefined | null;
/**
* Enable GFM line breaks. This option requires the gfm option to be true.
*/
breaks?: boolean | undefined;
/**
* Add tokenizers and renderers to marked
*/
extensions?:
| TokenizerAndRendererExtension[]
| undefined | null;
/**
* Enable GitHub flavored markdown.
*/
gfm?: boolean | undefined;
/**
* Include an id attribute when emitting headings.
* @deprecated Deprecated in v5.0.0 use marked-gfm-heading-id to include an id attribute when emitting headings (h1, h2, h3, etc).
*/
headerIds?: boolean | undefined;
/**
* Set the prefix for header tag ids.
* @deprecated Deprecated in v5.0.0 use marked-gfm-heading-id to add a string to prefix the id attribute when emitting headings (h1, h2, h3, etc).
*/
headerPrefix?: string | undefined;
/**
* A function to highlight code blocks. The function can either be
* synchronous (returning a string) or asynchronous (callback invoked
* with an error if any occurred during highlighting and a string
* if highlighting was successful)
* @deprecated Deprecated in v5.0.0 use marked-highlight to add highlighting to code blocks.
*/
highlight?: ((code: string, lang: string | undefined, callback?: (error: Error, code?: string) => void) => string | void) | null;
/**
* Hooks are methods that hook into some part of marked.
* preprocess is called to process markdown before sending it to marked.
* postprocess is called to process html after marked has finished parsing.
*/
hooks?: {
preprocess: (markdown: string) => string | Promise<string>,
postprocess: (html: string) => string | Promise<string>,
// eslint-disable-next-line no-use-before-define
options?: MarkedOptions
} | null;
/**
* Set the prefix for code block classes.
* @deprecated Deprecated in v5.0.0 use marked-highlight to prefix the className in a <code> block. Useful for syntax highlighting.
*/
langPrefix?: string | undefined;
/**
* Mangle autolinks (<email@domain.com>).
* @deprecated Deprecated in v5.0.0 use marked-mangle to mangle email addresses.
*/
mangle?: boolean | undefined;
/**
* Conform to obscure parts of markdown.pl as much as possible. Don't fix any of the original markdown bugs or poor behavior.
*/
pedantic?: boolean | undefined;
/**
* Type: object Default: new Renderer()
*
* An object containing functions to render tokens to HTML.
*/
renderer?: RendererObject | undefined | null;
/**
* Sanitize the output. Ignore any HTML that has been input. If true, sanitize the HTML passed into markdownString with the sanitizer function.
* @deprecated Warning: This feature is deprecated and it should NOT be used as it cannot be considered secure. Instead use a sanitize library, like DOMPurify (recommended), sanitize-html or insane on the output HTML!
*/
sanitize?: boolean | undefined;
/**
* Optionally sanitize found HTML with a sanitizer function.
* @deprecated A function to sanitize the HTML passed into markdownString.
*/
sanitizer?: ((html: string) => string) | null;
/**
* Shows an HTML error message when rendering fails.
*/
silent?: boolean | undefined;
/**
* Use smarter list behavior than the original markdown. May eventually be default with the old behavior moved into pedantic.
*/
smartLists?: boolean | undefined;
/**
* Use "smart" typograhic punctuation for things like quotes and dashes.
* @deprecated Deprecated in v5.0.0 use marked-smartypants to use "smart" typographic punctuation for things like quotes and dashes.
*/
smartypants?: boolean | undefined;
/**
* The tokenizer defines how to turn markdown text into tokens.
*/
tokenizer?: TokenizerObject | undefined | null;
/**
* The walkTokens function gets called with every token.
* Child tokens are called before moving on to sibling tokens.
* Each token is passed by reference so updates are persisted when passed to the parser.
* The return value of the function is ignored.
*/
walkTokens?: ((token: Token) => void | unknown | Promise<void>) | undefined | null;
/**
* Generate closing slash for self-closing tags (<br/> instead of <br>)
* @deprecated Deprecated in v5.0.0 use marked-xhtml to emit self-closing HTML tags for void elements (<br/>, <img/>, etc.) with a "/" as required by XHTML.
*/
xhtml?: boolean | undefined;
}
export interface MarkedOptions extends Omit<MarkedExtension, 'renderer' | 'tokenizer' | 'extensions' | 'walkTokens'> {
/**
* Type: object Default: new Renderer()
*
* An object containing functions to render tokens to HTML.
*/
renderer?: _Renderer | undefined | null;
/**
* The tokenizer defines how to turn markdown text into tokens.
*/
tokenizer?: _Tokenizer | undefined | null;
/**
* Custom extensions
*/
extensions?: null | {
renderers: {
[name: string]: RendererExtensionFunction;
};
childTokens: {
[name: string]: string[];
};
inline?: TokenizerExtensionFunction[];
block?: TokenizerExtensionFunction[];
startInline?: TokenizerStartFunction[];
startBlock?: TokenizerStartFunction[];
};
/**
* walkTokens function returns array of values for Promise.all
*/
walkTokens?: null | ((token: Token) => void | (unknown | Promise<void>)[]);
}

@ -0,0 +1,281 @@
import { _Renderer } from './Renderer';
import { _TextRenderer } from './TextRenderer';
import { _Slugger } from './Slugger';
import { _defaults } from './defaults';
import {
unescape
} from './helpers';
import type { Token, Tokens } from './Tokens';
import type { MarkedOptions } from './MarkedOptions';
/**
* Parsing & Compiling
*/
export class _Parser {
options: MarkedOptions;
renderer: _Renderer;
textRenderer: _TextRenderer;
slugger: _Slugger;
constructor(options?: MarkedOptions) {
this.options = options || _defaults;
this.options.renderer = this.options.renderer || new _Renderer();
this.renderer = this.options.renderer;
this.renderer.options = this.options;
this.textRenderer = new _TextRenderer();
this.slugger = new _Slugger();
}
/**
* Static Parse Method
*/
static parse(tokens: Token[], options?: MarkedOptions) {
const parser = new _Parser(options);
return parser.parse(tokens);
}
/**
* Static Parse Inline Method
*/
static parseInline(tokens: Token[], options?: MarkedOptions) {
const parser = new _Parser(options);
return parser.parseInline(tokens);
}
/**
* Parse Loop
*/
parse(tokens: Token[], top = true): string {
let out = '';
for (let i = 0; i < tokens.length; i++) {
const token = tokens[i];
// Run any renderer extensions
if (this.options.extensions && this.options.extensions.renderers && this.options.extensions.renderers[token.type]) {
const genericToken = token as Tokens.Generic;
const ret = this.options.extensions.renderers[genericToken.type].call({ parser: this }, genericToken);
if (ret !== false || !['space', 'hr', 'heading', 'code', 'table', 'blockquote', 'list', 'html', 'paragraph', 'text'].includes(genericToken.type)) {
out += ret || '';
continue;
}
}
switch (token.type) {
case 'space': {
continue;
}
case 'hr': {
out += this.renderer.hr();
continue;
}
case 'heading': {
const headingToken = token as Tokens.Heading;
out += this.renderer.heading(
this.parseInline(headingToken.tokens),
headingToken.depth,
unescape(this.parseInline(headingToken.tokens, this.textRenderer)),
this.slugger);
continue;
}
case 'code': {
const codeToken = token as Tokens.Code;
out += this.renderer.code(codeToken.text,
codeToken.lang,
!!codeToken.escaped);
continue;
}
case 'table': {
const tableToken = token as Tokens.Table;
let header = '';
// header
let cell = '';
for (let j = 0; j < tableToken.header.length; j++) {
cell += this.renderer.tablecell(
this.parseInline(tableToken.header[j].tokens),
{ header: true, align: tableToken.align[j] }
);
}
header += this.renderer.tablerow(cell);
let body = '';
for (let j = 0; j < tableToken.rows.length; j++) {
const row = tableToken.rows[j];
cell = '';
for (let k = 0; k < row.length; k++) {
cell += this.renderer.tablecell(
this.parseInline(row[k].tokens),
{ header: false, align: tableToken.align[k] }
);
}
body += this.renderer.tablerow(cell);
}
out += this.renderer.table(header, body);
continue;
}
case 'blockquote': {
const blockquoteToken = token as Tokens.Blockquote;
const body = this.parse(blockquoteToken.tokens);
out += this.renderer.blockquote(body);
continue;
}
case 'list': {
const listToken = token as Tokens.List;
const ordered = listToken.ordered;
const start = listToken.start;
const loose = listToken.loose;
let body = '';
for (let j = 0; j < listToken.items.length; j++) {
const item = listToken.items[j];
const checked = item.checked;
const task = item.task;
let itemBody = '';
if (item.task) {
const checkbox = this.renderer.checkbox(!!checked);
if (loose) {
if (item.tokens.length > 0 && item.tokens[0].type === 'paragraph') {
item.tokens[0].text = checkbox + ' ' + item.tokens[0].text;
if (item.tokens[0].tokens && item.tokens[0].tokens.length > 0 && item.tokens[0].tokens[0].type === 'text') {
item.tokens[0].tokens[0].text = checkbox + ' ' + item.tokens[0].tokens[0].text;
}
} else {
item.tokens.unshift({
type: 'text',
text: checkbox
} as Tokens.Text);
}
} else {
itemBody += checkbox;
}
}
itemBody += this.parse(item.tokens, loose);
body += this.renderer.listitem(itemBody, task, !!checked);
}
out += this.renderer.list(body, ordered, start);
continue;
}
case 'html': {
const htmlToken = token as Tokens.HTML;
out += this.renderer.html(htmlToken.text, htmlToken.block);
continue;
}
case 'paragraph': {
const paragraphToken = token as Tokens.Paragraph;
out += this.renderer.paragraph(this.parseInline(paragraphToken.tokens));
continue;
}
case 'text': {
let textToken = token as Tokens.Text;
let body = textToken.tokens ? this.parseInline(textToken.tokens) : textToken.text;
while (i + 1 < tokens.length && tokens[i + 1].type === 'text') {
textToken = tokens[++i] as Tokens.Text;
body += '\n' + (textToken.tokens ? this.parseInline(textToken.tokens) : textToken.text);
}
out += top ? this.renderer.paragraph(body) : body;
continue;
}
default: {
const errMsg = 'Token with "' + token.type + '" type was not found.';
if (this.options.silent) {
console.error(errMsg);
return '';
} else {
throw new Error(errMsg);
}
}
}
}
return out;
}
/**
* Parse Inline Tokens
*/
parseInline(tokens: Token[], renderer?: _Renderer | _TextRenderer): string {
renderer = renderer || this.renderer;
let out = '';
for (let i = 0; i < tokens.length; i++) {
const token = tokens[i];
// Run any renderer extensions
if (this.options.extensions && this.options.extensions.renderers && this.options.extensions.renderers[token.type]) {
const ret = this.options.extensions.renderers[token.type].call({ parser: this }, token);
if (ret !== false || !['escape', 'html', 'link', 'image', 'strong', 'em', 'codespan', 'br', 'del', 'text'].includes(token.type)) {
out += ret || '';
continue;
}
}
switch (token.type) {
case 'escape': {
const escapeToken = token as Tokens.Escape;
out += renderer.text(escapeToken.text);
break;
}
case 'html': {
const tagToken = token as Tokens.Tag;
out += renderer.html(tagToken.text);
break;
}
case 'link': {
const linkToken = token as Tokens.Link;
out += renderer.link(linkToken.href, linkToken.title, this.parseInline(linkToken.tokens, renderer));
break;
}
case 'image': {
const imageToken = token as Tokens.Image;
out += renderer.image(imageToken.href, imageToken.title, imageToken.text);
break;
}
case 'strong': {
const strongToken = token as Tokens.Strong;
out += renderer.strong(this.parseInline(strongToken.tokens, renderer));
break;
}
case 'em': {
const emToken = token as Tokens.Em;
out += renderer.em(this.parseInline(emToken.tokens, renderer));
break;
}
case 'codespan': {
const codespanToken = token as Tokens.Codespan;
out += renderer.codespan(codespanToken.text);
break;
}
case 'br': {
out += renderer.br();
break;
}
case 'del': {
const delToken = token as Tokens.Del;
out += renderer.del(this.parseInline(delToken.tokens, renderer));
break;
}
case 'text': {
const textToken = token as Tokens.Text;
out += renderer.text(textToken.text);
break;
}
default: {
const errMsg = 'Token with "' + token.type + '" type was not found.';
if (this.options.silent) {
console.error(errMsg);
return '';
} else {
throw new Error(errMsg);
}
}
}
}
return out;
}
}

@ -0,0 +1,169 @@
import { _defaults } from './defaults';
import {
cleanUrl,
escape
} from './helpers';
import type { MarkedOptions } from './MarkedOptions';
import type { _Slugger } from './Slugger';
/**
* Renderer
*/
export class _Renderer {
options: MarkedOptions;
constructor(options?: MarkedOptions) {
this.options = options || _defaults;
}
code(code: string, infostring: string | undefined, escaped: boolean): string {
const lang = (infostring || '').match(/^\S*/)?.[0];
if (this.options.highlight) {
const out = this.options.highlight(code, lang);
if (out != null && out !== code) {
escaped = true;
code = out;
}
}
code = code.replace(/\n$/, '') + '\n';
if (!lang) {
return '<pre><code>'
+ (escaped ? code : escape(code, true))
+ '</code></pre>\n';
}
return '<pre><code class="'
+ this.options.langPrefix
+ escape(lang)
+ '">'
+ (escaped ? code : escape(code, true))
+ '</code></pre>\n';
}
blockquote(quote: string): string {
return `<blockquote>\n${quote}</blockquote>\n`;
}
html(html: string, block?: boolean) : string {
return html;
}
heading(text: string, level: number, raw: string, slugger: _Slugger): string {
if (this.options.headerIds) {
const id = this.options.headerPrefix + slugger.slug(raw);
return `<h${level} id="${id}">${text}</h${level}>\n`;
}
// ignore IDs
return `<h${level}>${text}</h${level}>\n`;
}
hr(): string {
return this.options.xhtml ? '<hr/>\n' : '<hr>\n';
}
list(body: string, ordered: boolean, start: number | ''): string {
const type = ordered ? 'ol' : 'ul';
const startatt = (ordered && start !== 1) ? (' start="' + start + '"') : '';
return '<' + type + startatt + '>\n' + body + '</' + type + '>\n';
}
listitem(text: string, task: boolean, checked: boolean): string {
return `<li>${text}</li>\n`;
}
checkbox(checked: boolean): string {
return '<input '
+ (checked ? 'checked="" ' : '')
+ 'disabled="" type="checkbox"'
+ (this.options.xhtml ? ' /' : '')
+ '> ';
}
paragraph(text: string): string {
return `<p>${text}</p>\n`;
}
table(header: string, body: string): string {
if (body) body = `<tbody>${body}</tbody>`;
return '<table>\n'
+ '<thead>\n'
+ header
+ '</thead>\n'
+ body
+ '</table>\n';
}
tablerow(content: string): string {
return `<tr>\n${content}</tr>\n`;
}
tablecell(content: string, flags: {
header: boolean;
align: 'center' | 'left' | 'right' | null;
}): string {
const type = flags.header ? 'th' : 'td';
const tag = flags.align
? `<${type} align="${flags.align}">`
: `<${type}>`;
return tag + content + `</${type}>\n`;
}
/**
* span level renderer
*/
strong(text: string): string {
return `<strong>${text}</strong>`;
}
em(text: string): string {
return `<em>${text}</em>`;
}
codespan(text: string): string {
return `<code>${text}</code>`;
}
br(): string {
return this.options.xhtml ? '<br/>' : '<br>';
}
del(text: string): string {
return `<del>${text}</del>`;
}
link(href: string, title: string | null | undefined, text: string): string {
const cleanHref = cleanUrl(this.options.sanitize, this.options.baseUrl, href);
if (cleanHref === null) {
return text;
}
href = cleanHref;
let out = '<a href="' + href + '"';
if (title) {
out += ' title="' + title + '"';
}
out += '>' + text + '</a>';
return out;
}
image(href: string, title: string | null, text: string): string {
const cleanHref = cleanUrl(this.options.sanitize, this.options.baseUrl, href);
if (cleanHref === null) {
return text;
}
href = cleanHref;
let out = `<img src="${href}" alt="${text}"`;
if (title) {
out += ` title="${title}"`;
}
out += this.options.xhtml ? '/>' : '>';
return out;
}
text(text: string) : string {
return text;
}
}

@ -0,0 +1,51 @@
import type { SluggerOptions } from './MarkedOptions';
/**
* Slugger generates header id
*/
export class _Slugger {
seen: { [slugValue: string]: number };
constructor() {
this.seen = {};
}
serialize(value: string) {
return value
.toLowerCase()
.trim()
// remove html tags
.replace(/<[!\/a-z].*?>/ig, '')
// remove unwanted chars
.replace(/[\u2000-\u206F\u2E00-\u2E7F\\'!"#$%&()*+,./:;<=>?@[\]^`{|}~]/g, '')
.replace(/\s/g, '-');
}
/**
* Finds the next safe (unique) slug to use
*/
getNextSafeSlug(originalSlug: string, isDryRun: boolean | undefined) {
let slug = originalSlug;
let occurenceAccumulator = 0;
if (this.seen.hasOwnProperty(slug)) {
occurenceAccumulator = this.seen[originalSlug];
do {
occurenceAccumulator++;
slug = originalSlug + '-' + occurenceAccumulator;
} while (this.seen.hasOwnProperty(slug));
}
if (!isDryRun) {
this.seen[originalSlug] = occurenceAccumulator;
this.seen[slug] = 0;
}
return slug;
}
/**
* Convert string to unique id
*/
slug(value: string, options: SluggerOptions = {}) {
const slug = this.serialize(value);
return this.getNextSafeSlug(slug, options.dryrun);
}
}

@ -0,0 +1,42 @@
/**
* TextRenderer
* returns only the textual part of the token
*/
export class _TextRenderer {
// no need for block level renderers
strong(text: string) {
return text;
}
em(text: string) {
return text;
}
codespan(text: string) {
return text;
}
del(text: string) {
return text;
}
html(text: string) {
return text;
}
text(text: string) {
return text;
}
link(href: string, title: string | null | undefined, text: string) {
return '' + text;
}
image(href: string, title: string | null, text: string) {
return '' + text;
}
br() {
return '';
}
}

@ -0,0 +1,813 @@
import { _defaults } from './defaults';
import {
rtrim,
splitCells,
escape,
findClosingBracket
} from './helpers';
import type { _Lexer } from './Lexer';
import type { Links, Tokens } from './Tokens';
import type { MarkedOptions } from './MarkedOptions';
function outputLink(cap: string[], link: Pick<Tokens.Link, 'href' | 'title'>, raw: string, lexer: _Lexer): Tokens.Link | Tokens.Image {
const href = link.href;
const title = link.title ? escape(link.title) : null;
const text = cap[1].replace(/\\([\[\]])/g, '$1');
if (cap[0].charAt(0) !== '!') {
lexer.state.inLink = true;
const token: Tokens.Link = {
type: 'link',
raw,
href,
title,
text,
tokens: lexer.inlineTokens(text)
};
lexer.state.inLink = false;
return token;
}
return {
type: 'image',
raw,
href,
title,
text: escape(text)
};
}
function indentCodeCompensation(raw: string, text: string) {
const matchIndentToCode = raw.match(/^(\s+)(?:```)/);
if (matchIndentToCode === null) {
return text;
}
const indentToCode = matchIndentToCode[1];
return text
.split('\n')
.map(node => {
const matchIndentInNode = node.match(/^\s+/);
if (matchIndentInNode === null) {
return node;
}
const [indentInNode] = matchIndentInNode;
if (indentInNode.length >= indentToCode.length) {
return node.slice(indentToCode.length);
}
return node;
})
.join('\n');
}
/**
* Tokenizer
*/
export class _Tokenizer {
options: MarkedOptions;
// TODO: Fix this rules type
rules: any;
lexer!: _Lexer;
constructor(options?: MarkedOptions) {
this.options = options || _defaults;
}
space(src: string): Tokens.Space | undefined {
const cap = this.rules.block.newline.exec(src);
if (cap && cap[0].length > 0) {
return {
type: 'space',
raw: cap[0]
};
}
}
code(src: string): Tokens.Code | undefined {
const cap = this.rules.block.code.exec(src);
if (cap) {
const text = cap[0].replace(/^ {1,4}/gm, '');
return {
type: 'code',
raw: cap[0],
codeBlockStyle: 'indented',
text: !this.options.pedantic
? rtrim(text, '\n')
: text
};
}
}
fences(src: string): Tokens.Code | undefined {
const cap = this.rules.block.fences.exec(src);
if (cap) {
const raw = cap[0];
const text = indentCodeCompensation(raw, cap[3] || '');
return {
type: 'code',
raw,
lang: cap[2] ? cap[2].trim().replace(this.rules.inline._escapes, '$1') : cap[2],
text
};
}
}
heading(src: string): Tokens.Heading | undefined {
const cap = this.rules.block.heading.exec(src);
if (cap) {
let text = cap[2].trim();
// remove trailing #s
if (/#$/.test(text)) {
const trimmed = rtrim(text, '#');
if (this.options.pedantic) {
text = trimmed.trim();
} else if (!trimmed || / $/.test(trimmed)) {
// CommonMark requires space before trailing #s
text = trimmed.trim();
}
}
return {
type: 'heading',
raw: cap[0],
depth: cap[1].length,
text,
tokens: this.lexer.inline(text)
};
}
}
hr(src: string): Tokens.Hr | undefined {
const cap = this.rules.block.hr.exec(src);
if (cap) {
return {
type: 'hr',
raw: cap[0]
};
}
}
blockquote(src: string): Tokens.Blockquote | undefined {
const cap = this.rules.block.blockquote.exec(src);
if (cap) {
const text = cap[0].replace(/^ *>[ \t]?/gm, '');
const top = this.lexer.state.top;
this.lexer.state.top = true;
const tokens = this.lexer.blockTokens(text);
this.lexer.state.top = top;
return {
type: 'blockquote',
raw: cap[0],
tokens,
text
};
}
}
list(src: string): Tokens.List | undefined {
let cap = this.rules.block.list.exec(src);
if (cap) {
let bull = cap[1].trim();
const isordered = bull.length > 1;
const list: Tokens.List = {
type: 'list',
raw: '',
ordered: isordered,
start: isordered ? +bull.slice(0, -1) : '',
loose: false,
items: [] as Tokens.ListItem[]
};
bull = isordered ? `\\d{1,9}\\${bull.slice(-1)}` : `\\${bull}`;
if (this.options.pedantic) {
bull = isordered ? bull : '[*+-]';
}
// Get next list item
const itemRegex = new RegExp(`^( {0,3}${bull})((?:[\t ][^\\n]*)?(?:\\n|$))`);
let raw = '';
let itemContents = '';
let endsWithBlankLine = false;
// Check if current bullet point can start a new List Item
while (src) {
let endEarly = false;
if (!(cap = itemRegex.exec(src))) {
break;
}
if (this.rules.block.hr.test(src)) { // End list if bullet was actually HR (possibly move into itemRegex?)
break;
}
raw = cap[0] as string;
src = src.substring(raw.length);
let line = cap[2].split('\n', 1)[0].replace(/^\t+/, (t: string) => ' '.repeat(3 * t.length)) as string;
let nextLine = src.split('\n', 1)[0];
let indent = 0;
if (this.options.pedantic) {
indent = 2;
itemContents = line.trimLeft();
} else {
indent = cap[2].search(/[^ ]/); // Find first non-space char
indent = indent > 4 ? 1 : indent; // Treat indented code blocks (> 4 spaces) as having only 1 indent
itemContents = line.slice(indent);
indent += cap[1].length;
}
let blankLine = false;
if (!line && /^ *$/.test(nextLine)) { // Items begin with at most one blank line
raw += nextLine + '\n';
src = src.substring(nextLine.length + 1);
endEarly = true;
}
if (!endEarly) {
const nextBulletRegex = new RegExp(`^ {0,${Math.min(3, indent - 1)}}(?:[*+-]|\\d{1,9}[.)])((?:[ \t][^\\n]*)?(?:\\n|$))`);
const hrRegex = new RegExp(`^ {0,${Math.min(3, indent - 1)}}((?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$)`);
const fencesBeginRegex = new RegExp(`^ {0,${Math.min(3, indent - 1)}}(?:\`\`\`|~~~)`);
const headingBeginRegex = new RegExp(`^ {0,${Math.min(3, indent - 1)}}#`);
// Check if following lines should be included in List Item
while (src) {
const rawLine = src.split('\n', 1)[0];
nextLine = rawLine;
// Re-align to follow commonmark nesting rules
if (this.options.pedantic) {
nextLine = nextLine.replace(/^ {1,4}(?=( {4})*[^ ])/g, ' ');
}
// End list item if found code fences
if (fencesBeginRegex.test(nextLine)) {
break;
}
// End list item if found start of new heading
if (headingBeginRegex.test(nextLine)) {
break;
}
// End list item if found start of new bullet
if (nextBulletRegex.test(nextLine)) {
break;
}
// Horizontal rule found
if (hrRegex.test(src)) {
break;
}
if (nextLine.search(/[^ ]/) >= indent || !nextLine.trim()) { // Dedent if possible
itemContents += '\n' + nextLine.slice(indent);
} else {
// not enough indentation
if (blankLine) {
break;
}
// paragraph continuation unless last line was a different block level element
if (line.search(/[^ ]/) >= 4) { // indented code block
break;
}
if (fencesBeginRegex.test(line)) {
break;
}
if (headingBeginRegex.test(line)) {
break;
}
if (hrRegex.test(line)) {
break;
}
itemContents += '\n' + nextLine;
}
if (!blankLine && !nextLine.trim()) { // Check if current line is blank
blankLine = true;
}
raw += rawLine + '\n';
src = src.substring(rawLine.length + 1);
line = nextLine.slice(indent);
}
}
if (!list.loose) {
// If the previous item ended with a blank line, the list is loose
if (endsWithBlankLine) {
list.loose = true;
} else if (/\n *\n *$/.test(raw)) {
endsWithBlankLine = true;
}
}
let istask: RegExpExecArray | null = null;
let ischecked: boolean | undefined;
// Check for task list items
if (this.options.gfm) {
istask = /^\[[ xX]\] /.exec(itemContents);
if (istask) {
ischecked = istask[0] !== '[ ] ';
itemContents = itemContents.replace(/^\[[ xX]\] +/, '');
}
}
list.items.push({
type: 'list_item',
raw,
task: !!istask,
checked: ischecked,
loose: false,
text: itemContents,
tokens: []
});
list.raw += raw;
}
// Do not consume newlines at end of final item. Alternatively, make itemRegex *start* with any newlines to simplify/speed up endsWithBlankLine logic
list.items[list.items.length - 1].raw = raw.trimRight();
(list.items[list.items.length - 1] as Tokens.ListItem).text = itemContents.trimRight();
list.raw = list.raw.trimRight();
// Item child tokens handled here at end because we needed to have the final item to trim it first
for (let i = 0; i < list.items.length; i++) {
this.lexer.state.top = false;
list.items[i].tokens = this.lexer.blockTokens(list.items[i].text, []);
if (!list.loose) {
// Check if list should be loose
const spacers = list.items[i].tokens.filter(t => t.type === 'space');
const hasMultipleLineBreaks = spacers.length > 0 && spacers.some(t => /\n.*\n/.test(t.raw));
list.loose = hasMultipleLineBreaks;
}
}
// Set all items to loose if list is loose
if (list.loose) {
for (let i = 0; i < list.items.length; i++) {
list.items[i].loose = true;
}
}
return list;
}
}
html(src: string): Tokens.HTML | Tokens.Paragraph | undefined {
const cap = this.rules.block.html.exec(src);
if (cap) {
const token: Tokens.HTML | Tokens.Paragraph = {
type: 'html',
block: true,
raw: cap[0],
pre: !this.options.sanitizer
&& (cap[1] === 'pre' || cap[1] === 'script' || cap[1] === 'style'),
text: cap[0]
};
if (this.options.sanitize) {
const text = this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape(cap[0]);
const paragraph = token as unknown as Tokens.Paragraph;
paragraph.type = 'paragraph';
paragraph.text = text;
paragraph.tokens = this.lexer.inline(text);
}
return token;
}
}
def(src: string): Tokens.Def | undefined {
const cap = this.rules.block.def.exec(src);
if (cap) {
const tag = cap[1].toLowerCase().replace(/\s+/g, ' ');
const href = cap[2] ? cap[2].replace(/^<(.*)>$/, '$1').replace(this.rules.inline._escapes, '$1') : '';
const title = cap[3] ? cap[3].substring(1, cap[3].length - 1).replace(this.rules.inline._escapes, '$1') : cap[3];
return {
type: 'def',
tag,
raw: cap[0],
href,
title
};
}
}
table(src: string): Tokens.Table | undefined {
const cap = this.rules.block.table.exec(src);
if (cap) {
const item: Tokens.Table = {
type: 'table',
raw: cap[0],
header: splitCells(cap[1]).map(c => {
return { text: c, tokens: [] };
}),
align: cap[2].replace(/^ *|\| *$/g, '').split(/ *\| */),
rows: cap[3] && cap[3].trim() ? cap[3].replace(/\n[ \t]*$/, '').split('\n') : []
};
if (item.header.length === item.align.length) {
let l = item.align.length;
let i, j, k, row;
for (i = 0; i < l; i++) {
const align = item.align[i];
if (align) {
if (/^ *-+: *$/.test(align)) {
item.align[i] = 'right';
} else if (/^ *:-+: *$/.test(align)) {
item.align[i] = 'center';
} else if (/^ *:-+ *$/.test(align)) {
item.align[i] = 'left';
} else {
item.align[i] = null;
}
}
}
l = item.rows.length;
for (i = 0; i < l; i++) {
item.rows[i] = splitCells(item.rows[i] as unknown as string, item.header.length).map(c => {
return { text: c, tokens: [] };
});
}
// parse child tokens inside headers and cells
// header child tokens
l = item.header.length;
for (j = 0; j < l; j++) {
item.header[j].tokens = this.lexer.inline(item.header[j].text);
}
// cell child tokens
l = item.rows.length;
for (j = 0; j < l; j++) {
row = item.rows[j];
for (k = 0; k < row.length; k++) {
row[k].tokens = this.lexer.inline(row[k].text);
}
}
return item;
}
}
}
lheading(src: string): Tokens.Heading | undefined {
const cap = this.rules.block.lheading.exec(src);
if (cap) {
return {
type: 'heading',
raw: cap[0],
depth: cap[2].charAt(0) === '=' ? 1 : 2,
text: cap[1],
tokens: this.lexer.inline(cap[1])
};
}
}
paragraph(src: string): Tokens.Paragraph | undefined {
const cap = this.rules.block.paragraph.exec(src);
if (cap) {
const text = cap[1].charAt(cap[1].length - 1) === '\n'
? cap[1].slice(0, -1)
: cap[1];
return {
type: 'paragraph',
raw: cap[0],
text,
tokens: this.lexer.inline(text)
};
}
}
text(src: string): Tokens.Text | undefined {
const cap = this.rules.block.text.exec(src);
if (cap) {
return {
type: 'text',
raw: cap[0],
text: cap[0],
tokens: this.lexer.inline(cap[0])
};
}
}
escape(src: string): Tokens.Escape | undefined {
const cap = this.rules.inline.escape.exec(src);
if (cap) {
return {
type: 'escape',
raw: cap[0],
text: escape(cap[1])
};
}
}
tag(src: string): Tokens.Tag | undefined {
const cap = this.rules.inline.tag.exec(src);
if (cap) {
if (!this.lexer.state.inLink && /^<a /i.test(cap[0])) {
this.lexer.state.inLink = true;
} else if (this.lexer.state.inLink && /^<\/a>/i.test(cap[0])) {
this.lexer.state.inLink = false;
}
if (!this.lexer.state.inRawBlock && /^<(pre|code|kbd|script)(\s|>)/i.test(cap[0])) {
this.lexer.state.inRawBlock = true;
} else if (this.lexer.state.inRawBlock && /^<\/(pre|code|kbd|script)(\s|>)/i.test(cap[0])) {
this.lexer.state.inRawBlock = false;
}
return {
type: this.options.sanitize
? 'text'
: 'html',
raw: cap[0],
inLink: this.lexer.state.inLink,
inRawBlock: this.lexer.state.inRawBlock,
block: false,
text: this.options.sanitize
? (this.options.sanitizer
? this.options.sanitizer(cap[0])
: escape(cap[0]))
: cap[0]
};
}
}
link(src: string): Tokens.Link | Tokens.Image | undefined {
const cap = this.rules.inline.link.exec(src);
if (cap) {
const trimmedUrl = cap[2].trim();
if (!this.options.pedantic && /^</.test(trimmedUrl)) {
// commonmark requires matching angle brackets
if (!(/>$/.test(trimmedUrl))) {
return;
}
// ending angle bracket cannot be escaped
const rtrimSlash = rtrim(trimmedUrl.slice(0, -1), '\\');
if ((trimmedUrl.length - rtrimSlash.length) % 2 === 0) {
return;
}
} else {
// find closing parenthesis
const lastParenIndex = findClosingBracket(cap[2], '()');
if (lastParenIndex > -1) {
const start = cap[0].indexOf('!') === 0 ? 5 : 4;
const linkLen = start + cap[1].length + lastParenIndex;
cap[2] = cap[2].substring(0, lastParenIndex);
cap[0] = cap[0].substring(0, linkLen).trim();
cap[3] = '';
}
}
let href = cap[2];
let title = '';
if (this.options.pedantic) {
// split pedantic href and title
const link = /^([^'"]*[^\s])\s+(['"])(.*)\2/.exec(href);
if (link) {
href = link[1];
title = link[3];
}
} else {
title = cap[3] ? cap[3].slice(1, -1) : '';
}
href = href.trim();
if (/^</.test(href)) {
if (this.options.pedantic && !(/>$/.test(trimmedUrl))) {
// pedantic allows starting angle bracket without ending angle bracket
href = href.slice(1);
} else {
href = href.slice(1, -1);
}
}
return outputLink(cap, {
href: href ? href.replace(this.rules.inline._escapes, '$1') : href,
title: title ? title.replace(this.rules.inline._escapes, '$1') : title
}, cap[0], this.lexer);
}
}
reflink(src: string, links: Links): Tokens.Link | Tokens.Image | Tokens.Text | undefined {
let cap;
if ((cap = this.rules.inline.reflink.exec(src))
|| (cap = this.rules.inline.nolink.exec(src))) {
let link = (cap[2] || cap[1]).replace(/\s+/g, ' ');
link = links[link.toLowerCase()];
if (!link) {
const text = cap[0].charAt(0);
return {
type: 'text',
raw: text,
text
};
}
return outputLink(cap, link, cap[0], this.lexer);
}
}
emStrong(src: string, maskedSrc: string, prevChar = ''): Tokens.Em | Tokens.Strong | undefined {
let match = this.rules.inline.emStrong.lDelim.exec(src);
if (!match) return;
// _ can't be between two alphanumerics. \p{L}\p{N} includes non-english alphabet/numbers as well
if (match[3] && prevChar.match(/[\p{L}\p{N}]/u)) return;
const nextChar = match[1] || match[2] || '';
if (!nextChar || !prevChar || this.rules.inline.punctuation.exec(prevChar)) {
// unicode Regex counts emoji as 1 char; spread into array for proper count (used multiple times below)
const lLength = [...match[0]].length - 1;
let rDelim, rLength, delimTotal = lLength, midDelimTotal = 0;
const endReg = match[0][0] === '*' ? this.rules.inline.emStrong.rDelimAst : this.rules.inline.emStrong.rDelimUnd;
endReg.lastIndex = 0;
// Clip maskedSrc to same section of string as src (move to lexer?)
maskedSrc = maskedSrc.slice(-1 * src.length + lLength);
while ((match = endReg.exec(maskedSrc)) != null) {
rDelim = match[1] || match[2] || match[3] || match[4] || match[5] || match[6];
if (!rDelim) continue; // skip single * in __abc*abc__
rLength = [...rDelim].length;
if (match[3] || match[4]) { // found another Left Delim
delimTotal += rLength;
continue;
} else if (match[5] || match[6]) { // either Left or Right Delim
if (lLength % 3 && !((lLength + rLength) % 3)) {
midDelimTotal += rLength;
continue; // CommonMark Emphasis Rules 9-10
}
}
delimTotal -= rLength;
if (delimTotal > 0) continue; // Haven't found enough closing delimiters
// Remove extra characters. *a*** -> *a*
rLength = Math.min(rLength, rLength + delimTotal + midDelimTotal);
const raw = [...src].slice(0, lLength + match.index + rLength + 1).join('');
// Create `em` if smallest delimiter has odd char count. *a***
if (Math.min(lLength, rLength) % 2) {
const text = raw.slice(1, -1);
return {
type: 'em',
raw,
text,
tokens: this.lexer.inlineTokens(text)
};
}
// Create 'strong' if smallest delimiter has even char count. **a***
const text = raw.slice(2, -2);
return {
type: 'strong',
raw,
text,
tokens: this.lexer.inlineTokens(text)
};
}
}
}
codespan(src: string): Tokens.Codespan | undefined {
const cap = this.rules.inline.code.exec(src);
if (cap) {
let text = cap[2].replace(/\n/g, ' ');
const hasNonSpaceChars = /[^ ]/.test(text);
const hasSpaceCharsOnBothEnds = /^ /.test(text) && / $/.test(text);
if (hasNonSpaceChars && hasSpaceCharsOnBothEnds) {
text = text.substring(1, text.length - 1);
}
text = escape(text, true);
return {
type: 'codespan',
raw: cap[0],
text
};
}
}
br(src: string): Tokens.Br | undefined {
const cap = this.rules.inline.br.exec(src);
if (cap) {
return {
type: 'br',
raw: cap[0]
};
}
}
del(src: string): Tokens.Del | undefined {
const cap = this.rules.inline.del.exec(src);
if (cap) {
return {
type: 'del',
raw: cap[0],
text: cap[2],
tokens: this.lexer.inlineTokens(cap[2])
};
}
}
autolink(src: string, mangle: (cap: string) => string): Tokens.Link | undefined {
const cap = this.rules.inline.autolink.exec(src);
if (cap) {
let text, href;
if (cap[2] === '@') {
text = escape(this.options.mangle ? mangle(cap[1]) : cap[1]);
href = 'mailto:' + text;
} else {
text = escape(cap[1]);
href = text;
}
return {
type: 'link',
raw: cap[0],
text,
href,
tokens: [
{
type: 'text',
raw: text,
text
}
]
};
}
}
url(src: string, mangle: (cap: string) => string): Tokens.Link | undefined {
let cap;
if (cap = this.rules.inline.url.exec(src)) {
let text, href;
if (cap[2] === '@') {
text = escape(this.options.mangle ? mangle(cap[0]) : cap[0]);
href = 'mailto:' + text;
} else {
// do extended autolink path validation
let prevCapZero;
do {
prevCapZero = cap[0];
cap[0] = this.rules.inline._backpedal.exec(cap[0])[0];
} while (prevCapZero !== cap[0]);
text = escape(cap[0]);
if (cap[1] === 'www.') {
href = 'http://' + cap[0];
} else {
href = cap[0];
}
}
return {
type: 'link',
raw: cap[0],
text,
href,
tokens: [
{
type: 'text',
raw: text,
text
}
]
};
}
}
inlineText(src: string, smartypants: (cap: string) => string): Tokens.Text | undefined {
const cap = this.rules.inline.text.exec(src);
if (cap) {
let text;
if (this.lexer.state.inRawBlock) {
text = this.options.sanitize ? (this.options.sanitizer ? this.options.sanitizer(cap[0]) : escape(cap[0])) : cap[0];
} else {
text = escape(this.options.smartypants ? smartypants(cap[0]) : cap[0]);
}
return {
type: 'text',
raw: cap[0],
text
};
}
}
}

@ -0,0 +1,201 @@
/* eslint-disable no-use-before-define */
export type Token = (
Tokens.Space
| Tokens.Code
| Tokens.Heading
| Tokens.Table
| Tokens.Hr
| Tokens.Blockquote
| Tokens.List
| Tokens.ListItem
| Tokens.Paragraph
| Tokens.HTML
| Tokens.Text
| Tokens.Def
| Tokens.Escape
| Tokens.Tag
| Tokens.Image
| Tokens.Link
| Tokens.Strong
| Tokens.Em
| Tokens.Codespan
| Tokens.Br
| Tokens.Del
| Tokens.Generic);
export namespace Tokens {
export interface Space {
type: 'space';
raw: string;
}
export interface Code {
type: 'code';
raw: string;
codeBlockStyle?: 'indented' | undefined;
lang?: string | undefined;
text: string;
escaped?: boolean;
}
export interface Heading {
type: 'heading';
raw: string;
depth: number;
text: string;
tokens: Token[];
}
export interface Table {
type: 'table';
raw: string;
align: Array<'center' | 'left' | 'right' | null>;
header: TableCell[];
rows: TableCell[][];
}
export interface TableCell {
text: string;
tokens: Token[];
}
export interface Hr {
type: 'hr';
raw: string;
}
export interface Blockquote {
type: 'blockquote';
raw: string;
text: string;
tokens: Token[];
}
export interface List {
type: 'list';
raw: string;
ordered: boolean;
start: number | '';
loose: boolean;
items: ListItem[];
}
export interface ListItem {
type: 'list_item';
raw: string;
task: boolean;
checked?: boolean | undefined;
loose: boolean;
text: string;
tokens: Token[];
}
export interface Paragraph {
type: 'paragraph';
raw: string;
pre?: boolean | undefined;
text: string;
tokens: Token[];
}
export interface HTML {
type: 'html';
raw: string;
pre: boolean;
text: string;
block: boolean;
}
export interface Text {
type: 'text';
raw: string;
text: string;
tokens?: Token[];
}
export interface Def {
type: 'def';
raw: string;
tag: string;
href: string;
title: string;
}
export interface Escape {
type: 'escape';
raw: string;
text: string;
}
export interface Tag {
type: 'text' | 'html';
raw: string;
inLink: boolean;
inRawBlock: boolean;
text: string;
block: boolean;
}
export interface Link {
type: 'link';
raw: string;
href: string;
title?: string | null;
text: string;
tokens: Token[];
}
export interface Image {
type: 'image';
raw: string;
href: string;
title: string | null;
text: string;
}
export interface Strong {
type: 'strong';
raw: string;
text: string;
tokens: Token[];
}
export interface Em {
type: 'em';
raw: string;
text: string;
tokens: Token[];
}
export interface Codespan {
type: 'codespan';
raw: string;
text: string;
}
export interface Br {
type: 'br';
raw: string;
}
export interface Del {
type: 'del';
raw: string;
text: string;
tokens: Token[];
}
export interface Generic {
[index: string]: any;
type: string;
raw: string;
tokens?: Token[] | undefined;
}
}
export type Links = Record<string, Pick<Tokens.Link | Tokens.Image, 'href' | 'title'>>;
export type TokensList = Token[] & {
links: Links;
};

@ -0,0 +1,35 @@
import type { MarkedOptions } from './MarkedOptions';
/**
* Gets the original marked default options.
*/
export function _getDefaults(): MarkedOptions {
return {
async: false,
baseUrl: null,
breaks: false,
extensions: null,
gfm: true,
headerIds: false,
headerPrefix: '',
highlight: null,
hooks: null,
langPrefix: 'language-',
mangle: false,
pedantic: false,
renderer: null,
sanitize: false,
sanitizer: null,
silent: false,
smartypants: false,
tokenizer: null,
walkTokens: null,
xhtml: false
};
}
export let _defaults = _getDefaults();
export function changeDefaults(newDefaults: MarkedOptions) {
_defaults = newDefaults;
}

@ -0,0 +1,265 @@
import type { MarkedOptions } from './MarkedOptions';
import type { ResultCallback } from './Instance';
import type { Rule } from './rules';
/**
* Helpers
*/
const escapeTest = /[&<>"']/;
const escapeReplace = new RegExp(escapeTest.source, 'g');
const escapeTestNoEncode = /[<>"']|&(?!(#\d{1,7}|#[Xx][a-fA-F0-9]{1,6}|\w+);)/;
const escapeReplaceNoEncode = new RegExp(escapeTestNoEncode.source, 'g');
const escapeReplacements: {[index: string]: string} = {
'&': '&amp;',
'<': '&lt;',
'>': '&gt;',
'"': '&quot;',
"'": '&#39;'
};
const getEscapeReplacement = (ch: string) => escapeReplacements[ch];
export function escape(html: string, encode?: boolean) {
if (encode) {
if (escapeTest.test(html)) {
return html.replace(escapeReplace, getEscapeReplacement);
}
} else {
if (escapeTestNoEncode.test(html)) {
return html.replace(escapeReplaceNoEncode, getEscapeReplacement);
}
}
return html;
}
const unescapeTest = /&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/ig;
export function unescape(html: string) {
// explicitly match decimal, hex, and named HTML entities
return html.replace(unescapeTest, (_, n) => {
n = n.toLowerCase();
if (n === 'colon') return ':';
if (n.charAt(0) === '#') {
return n.charAt(1) === 'x'
? String.fromCharCode(parseInt(n.substring(2), 16))
: String.fromCharCode(+n.substring(1));
}
return '';
});
}
const caret = /(^|[^\[])\^/g;
export function edit(regex: Rule, opt?: string) {
regex = typeof regex === 'string' ? regex : regex.source;
opt = opt || '';
const obj = {
replace: (name: string | RegExp, val: string | RegExp) => {
val = typeof val === 'object' && 'source' in val ? val.source : val;
val = val.replace(caret, '$1');
regex = (regex as string).replace(name, val);
return obj;
},
getRegex: () => {
return new RegExp(regex, opt);
}
};
return obj;
}
const nonWordAndColonTest = /[^\w:]/g;
const originIndependentUrl = /^$|^[a-z][a-z0-9+.-]*:|^[?#]/i;
export function cleanUrl(sanitize: boolean | undefined, base: string | undefined | null, href: string) {
if (sanitize) {
let prot;
try {
prot = decodeURIComponent(unescape(href))
.replace(nonWordAndColonTest, '')
.toLowerCase();
} catch (e) {
return null;
}
if (prot.indexOf('javascript:') === 0 || prot.indexOf('vbscript:') === 0 || prot.indexOf('data:') === 0) {
return null;
}
}
if (base && !originIndependentUrl.test(href)) {
href = resolveUrl(base, href);
}
try {
href = encodeURI(href).replace(/%25/g, '%');
} catch (e) {
return null;
}
return href;
}
const baseUrls: Record<string, string> = {};
const justDomain = /^[^:]+:\/*[^/]*$/;
const protocol = /^([^:]+:)[\s\S]*$/;
const domain = /^([^:]+:\/*[^/]*)[\s\S]*$/;
export function resolveUrl(base: string, href: string) {
if (!baseUrls[' ' + base]) {
// we can ignore everything in base after the last slash of its path component,
// but we might need to add _that_
// https://tools.ietf.org/html/rfc3986#section-3
if (justDomain.test(base)) {
baseUrls[' ' + base] = base + '/';
} else {
baseUrls[' ' + base] = rtrim(base, '/', true);
}
}
base = baseUrls[' ' + base];
const relativeBase = base.indexOf(':') === -1;
if (href.substring(0, 2) === '//') {
if (relativeBase) {
return href;
}
return base.replace(protocol, '$1') + href;
} else if (href.charAt(0) === '/') {
if (relativeBase) {
return href;
}
return base.replace(domain, '$1') + href;
} else {
return base + href;
}
}
export const noopTest = { exec: () => null };
export function splitCells(tableRow: string, count?: number) {
// ensure that every cell-delimiting pipe has a space
// before it to distinguish it from an escaped pipe
const row = tableRow.replace(/\|/g, (_, offset, str) => {
let escaped = false;
let curr = offset;
while (--curr >= 0 && str[curr] === '\\') escaped = !escaped;
if (escaped) {
// odd number of slashes means | is escaped
// so we leave it alone
return '|';
} else {
// add space before unescaped |
return ' |';
}
}),
cells = row.split(/ \|/);
let i = 0;
// First/last cell in a row cannot be empty if it has no leading/trailing pipe
if (!cells[0].trim()) {
cells.shift();
}
if (cells.length > 0 && !cells[cells.length - 1].trim()) {
cells.pop();
}
if (count) {
if (cells.length > count) {
cells.splice(count);
} else {
while (cells.length < count) cells.push('');
}
}
for (; i < cells.length; i++) {
// leading or trailing whitespace is ignored per the gfm spec
cells[i] = cells[i].trim().replace(/\\\|/g, '|');
}
return cells;
}
/**
* Remove trailing 'c's. Equivalent to str.replace(/c*$/, '').
* /c*$/ is vulnerable to REDOS.
*
* @param str
* @param c
* @param invert Remove suffix of non-c chars instead. Default falsey.
*/
export function rtrim(str: string, c: string, invert?: boolean) {
const l = str.length;
if (l === 0) {
return '';
}
// Length of suffix matching the invert condition.
let suffLen = 0;
// Step left until we fail to match the invert condition.
while (suffLen < l) {
const currChar = str.charAt(l - suffLen - 1);
if (currChar === c && !invert) {
suffLen++;
} else if (currChar !== c && invert) {
suffLen++;
} else {
break;
}
}
return str.slice(0, l - suffLen);
}
export function findClosingBracket(str: string, b: string) {
if (str.indexOf(b[1]) === -1) {
return -1;
}
let level = 0;
for (let i = 0; i < str.length; i++) {
if (str[i] === '\\') {
i++;
} else if (str[i] === b[0]) {
level++;
} else if (str[i] === b[1]) {
level--;
if (level < 0) {
return i;
}
}
}
return -1;
}
export function checkDeprecations(opt: MarkedOptions, callback?: ResultCallback) {
if (!opt || opt.silent) {
return;
}
if (callback) {
console.warn('marked(): callback is deprecated since version 5.0.0, should not be used and will be removed in the future. Read more here: https://marked.js.org/using_pro#async');
}
if (opt.sanitize || opt.sanitizer) {
console.warn('marked(): sanitize and sanitizer parameters are deprecated since version 0.7.0, should not be used and will be removed in the future. Read more here: https://marked.js.org/#/USING_ADVANCED.md#options');
}
if (opt.highlight || opt.langPrefix !== 'language-') {
console.warn('marked(): highlight and langPrefix parameters are deprecated since version 5.0.0, should not be used and will be removed in the future. Instead use https://www.npmjs.com/package/marked-highlight.');
}
if (opt.mangle) {
console.warn('marked(): mangle parameter is enabled by default, but is deprecated since version 5.0.0, and will be removed in the future. To clear this warning, install https://www.npmjs.com/package/marked-mangle, or disable by setting `{mangle: false}`.');
}
if (opt.baseUrl) {
console.warn('marked(): baseUrl parameter is deprecated since version 5.0.0, should not be used and will be removed in the future. Instead use https://www.npmjs.com/package/marked-base-url.');
}
if (opt.smartypants) {
console.warn('marked(): smartypants parameter is deprecated since version 5.0.0, should not be used and will be removed in the future. Instead use https://www.npmjs.com/package/marked-smartypants.');
}
if (opt.xhtml) {
console.warn('marked(): xhtml parameter is deprecated since version 5.0.0, should not be used and will be removed in the future. Instead use https://www.npmjs.com/package/marked-xhtml.');
}
if (opt.headerIds || opt.headerPrefix) {
console.warn('marked(): headerIds and headerPrefix parameters enabled by default, but are deprecated since version 5.0.0, and will be removed in the future. To clear this warning, install https://www.npmjs.com/package/marked-gfm-heading-id, or disable by setting `{headerIds: false}`.');
}
}

@ -0,0 +1,143 @@
import { _Lexer } from './Lexer';
import { _Parser } from './Parser';
import { _Tokenizer } from './Tokenizer';
import { _Renderer } from './Renderer';
import { _TextRenderer } from './TextRenderer';
import { _Slugger } from './Slugger';
import { _Hooks } from './Hooks';
import { Marked } from './Instance';
import {
_getDefaults,
changeDefaults,
_defaults
} from './defaults';
import type { MarkedExtension, MarkedOptions } from './MarkedOptions';
import type { Token, TokensList } from './Tokens';
import type { ResultCallback } from './Instance';
const markedInstance = new Marked();
/**
* Compiles markdown to HTML asynchronously.
*
* @param src String of markdown source to be compiled
* @param options Hash of options, having async: true
* @return Promise of string of compiled HTML
*/
export function marked(src: string, options: MarkedOptions & { async: true }): Promise<string>;
/**
* Compiles markdown to HTML synchronously.
*
* @param src String of markdown source to be compiled
* @param options Optional hash of options
* @return String of compiled HTML
*/
export function marked(src: string, options?: MarkedOptions): string;
/**
* Compiles markdown to HTML asynchronously with a callback.
*
* @param src String of markdown source to be compiled
* @param callback Function called when the markdownString has been fully parsed when using async highlighting
*/
export function marked(src: string, callback: ResultCallback): void;
/**
* Compiles markdown to HTML asynchronously with a callback.
*
* @param src String of markdown source to be compiled
* @param options Hash of options
* @param callback Function called when the markdownString has been fully parsed when using async highlighting
*/
export function marked(
src: string,
options: MarkedOptions,
callback: ResultCallback,
): void;
export function marked(src: string, opt?: MarkedOptions | ResultCallback, callback?: ResultCallback): string | Promise<string | undefined> | undefined {
return markedInstance.parse(src, opt, callback);
}
/**
* Sets the default options.
*
* @param options Hash of options
*/
marked.options =
marked.setOptions = function(options: MarkedOptions) {
markedInstance.setOptions(options);
marked.defaults = markedInstance.defaults;
changeDefaults(marked.defaults);
return marked;
};
/**
* Gets the original marked default options.
*/
marked.getDefaults = _getDefaults;
marked.defaults = _defaults;
/**
* Use Extension
*/
marked.use = function(...args: MarkedExtension[]) {
markedInstance.use(...args);
marked.defaults = markedInstance.defaults;
changeDefaults(marked.defaults);
return marked;
};
/**
* Run callback for every token
*/
marked.walkTokens = function <T = void>(tokens: Token[] | TokensList, callback: (token: Token) => T | T[]) {
return markedInstance.walkTokens(tokens, callback);
};
/**
* Compiles markdown to HTML without enclosing `p` tag.
*
* @param src String of markdown source to be compiled
* @param options Hash of options
* @return String of compiled HTML
*/
marked.parseInline = markedInstance.parseInline;
/**
* Expose
*/
marked.Parser = _Parser;
marked.parser = _Parser.parse;
marked.Renderer = _Renderer;
marked.TextRenderer = _TextRenderer;
marked.Lexer = _Lexer;
marked.lexer = _Lexer.lex;
marked.Tokenizer = _Tokenizer;
marked.Slugger = _Slugger;
marked.Hooks = _Hooks;
marked.parse = marked;
export const options = marked.options;
export const setOptions = marked.setOptions;
export const use = marked.use;
export const walkTokens = marked.walkTokens;
export const parseInline = marked.parseInline;
export const parse = marked;
export const parser = _Parser.parse;
export const lexer = _Lexer.lex;
export { _defaults as defaults, _getDefaults as getDefaults } from './defaults';
export { _Lexer as Lexer } from './Lexer';
export { _Parser as Parser } from './Parser';
export { _Tokenizer as Tokenizer } from './Tokenizer';
export { _Renderer as Renderer } from './Renderer';
export { _TextRenderer as TextRenderer } from './TextRenderer';
export { _Slugger as Slugger } from './Slugger';
export { _Hooks as Hooks } from './Hooks';
export { Marked } from './Instance';
export * from './MarkedOptions';
export * from './rules';
export * from './Tokens';

@ -0,0 +1,384 @@
import {
noopTest,
edit
} from './helpers';
export type Rule = RegExp | string;
export interface Rules {
[ruleName: string]: Pick<RegExp, 'exec'> | Rule | Rules;
}
type BlockRuleNames =
| 'newline'
| 'code'
| 'fences'
| 'hr'
| 'heading'
| 'blockquote'
| 'list'
| 'html'
| 'def'
| 'lheading'
| '_paragraph'
| 'text'
| '_label'
| '_title'
| 'bullet'
| 'listItemStart'
| '_tag'
| '_comment'
| 'paragraph'
| 'uote' ;
type BlockSubRuleNames = 'normal' | 'gfm' | 'pedantic';
type InlineRuleNames =
| 'escape'
| 'autolink'
| 'tag'
| 'link'
| 'reflink'
| 'nolink'
| 'reflinkSearch'
| 'code'
| 'br'
| 'text'
| '_punctuation'
| 'punctuation'
| 'blockSkip'
| 'escapedEmSt'
| '_comment'
| '_escapes'
| '_scheme'
| '_email'
| '_attribute'
| '_label'
| '_href'
| '_title'
| 'strong'
| '_extended_email'
| '_backpedal';
type InlineSubRuleNames = 'gfm' | 'emStrong' | 'normal' | 'pedantic'| 'breaks';
/**
* Block-Level Grammar
*/
// Not all rules are defined in the object literal
// @ts-expect-error
export const block: Record<BlockRuleNames, Rule> & Record<BlockSubRuleNames, Rules> & Rules = {
newline: /^(?: *(?:\n|$))+/,
code: /^( {4}[^\n]+(?:\n(?: *(?:\n|$))*)?)+/,
fences: /^ {0,3}(`{3,}(?=[^`\n]*(?:\n|$))|~{3,})([^\n]*)(?:\n|$)(?:|([\s\S]*?)(?:\n|$))(?: {0,3}\1[~`]* *(?=\n|$)|$)/,
hr: /^ {0,3}((?:-[\t ]*){3,}|(?:_[ \t]*){3,}|(?:\*[ \t]*){3,})(?:\n+|$)/,
heading: /^ {0,3}(#{1,6})(?=\s|$)(.*)(?:\n+|$)/,
blockquote: /^( {0,3}> ?(paragraph|[^\n]*)(?:\n|$))+/,
list: /^( {0,3}bull)([ \t][^\n]+?)?(?:\n|$)/,
html: '^ {0,3}(?:' // optional indentation
+ '<(script|pre|style|textarea)[\\s>][\\s\\S]*?(?:</\\1>[^\\n]*\\n+|$)' // (1)
+ '|comment[^\\n]*(\\n+|$)' // (2)
+ '|<\\?[\\s\\S]*?(?:\\?>\\n*|$)' // (3)
+ '|<![A-Z][\\s\\S]*?(?:>\\n*|$)' // (4)
+ '|<!\\[CDATA\\[[\\s\\S]*?(?:\\]\\]>\\n*|$)' // (5)
+ '|</?(tag)(?: +|\\n|/?>)[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (6)
+ '|<(?!script|pre|style|textarea)([a-z][\\w-]*)(?:attribute)*? */?>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (7) open tag
+ '|</(?!script|pre|style|textarea)[a-z][\\w-]*\\s*>(?=[ \\t]*(?:\\n|$))[\\s\\S]*?(?:(?:\\n *)+\\n|$)' // (7) closing tag
+ ')',
def: /^ {0,3}\[(label)\]: *(?:\n *)?([^<\s][^\s]*|<.*?>)(?:(?: +(?:\n *)?| *\n *)(title))? *(?:\n+|$)/,
table: noopTest,
lheading: /^((?:(?!^bull ).|\n(?!\n|bull ))+?)\n {0,3}(=+|-+) *(?:\n+|$)/,
// regex template, placeholders will be replaced according to different paragraph
// interruption rules of commonmark and the original markdown spec:
_paragraph: /^([^\n]+(?:\n(?!hr|heading|lheading|blockquote|fences|list|html|table| +\n)[^\n]+)*)/,
text: /^[^\n]+/
};
block._label = /(?!\s*\])(?:\\.|[^\[\]\\])+/;
block._title = /(?:"(?:\\"?|[^"\\])*"|'[^'\n]*(?:\n[^'\n]+)*\n?'|\([^()]*\))/;
block.def = edit(block.def)
.replace('label', block._label)
.replace('title', block._title)
.getRegex();
block.bullet = /(?:[*+-]|\d{1,9}[.)])/;
block.listItemStart = edit(/^( *)(bull) */)
.replace('bull', block.bullet)
.getRegex();
block.list = edit(block.list)
.replace(/bull/g, block.bullet)
.replace('hr', '\\n+(?=\\1?(?:(?:- *){3,}|(?:_ *){3,}|(?:\\* *){3,})(?:\\n+|$))')
.replace('def', '\\n+(?=' + block.def.source + ')')
.getRegex();
block._tag = 'address|article|aside|base|basefont|blockquote|body|caption'
+ '|center|col|colgroup|dd|details|dialog|dir|div|dl|dt|fieldset|figcaption'
+ '|figure|footer|form|frame|frameset|h[1-6]|head|header|hr|html|iframe'
+ '|legend|li|link|main|menu|menuitem|meta|nav|noframes|ol|optgroup|option'
+ '|p|param|section|source|summary|table|tbody|td|tfoot|th|thead|title|tr'
+ '|track|ul';
block._comment = /<!--(?!-?>)[\s\S]*?(?:-->|$)/;
block.html = edit(block.html, 'i')
.replace('comment', block._comment)
.replace('tag', block._tag)
.replace('attribute', / +[a-zA-Z:_][\w.:-]*(?: *= *"[^"\n]*"| *= *'[^'\n]*'| *= *[^\s"'=<>`]+)?/)
.getRegex();
block.lheading = edit(block.lheading)
.replace(/bull/g, block.bullet) // lists can interrupt
.getRegex();
block.paragraph = edit(block._paragraph)
.replace('hr', block.hr)
.replace('heading', ' {0,3}#{1,6} ')
.replace('|lheading', '') // setex headings don't interrupt commonmark paragraphs
.replace('|table', '')
.replace('blockquote', ' {0,3}>')
.replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n')
.replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
.replace('html', '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)')
.replace('tag', block._tag) // pars can be interrupted by type (6) html blocks
.getRegex();
block.blockquote = edit(block.blockquote)
.replace('paragraph', block.paragraph)
.getRegex();
/**
* Normal Block Grammar
*/
block.normal = { ...block };
/**
* GFM Block Grammar
*/
block.gfm = {
...block.normal,
table: '^ *([^\\n ].*\\|.*)\\n' // Header
+ ' {0,3}(?:\\| *)?(:?-+:? *(?:\\| *:?-+:? *)*)(?:\\| *)?' // Align
+ '(?:\\n((?:(?! *\\n|hr|heading|blockquote|code|fences|list|html).*(?:\\n|$))*)\\n*|$)' // Cells
};
block.gfm.table = edit(block.gfm.table as Rule)
.replace('hr', block.hr)
.replace('heading', ' {0,3}#{1,6} ')
.replace('blockquote', ' {0,3}>')
.replace('code', ' {4}[^\\n]')
.replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n')
.replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
.replace('html', '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)')
.replace('tag', block._tag) // tables can be interrupted by type (6) html blocks
.getRegex();
block.gfm.paragraph = edit(block._paragraph)
.replace('hr', block.hr)
.replace('heading', ' {0,3}#{1,6} ')
.replace('|lheading', '') // setex headings don't interrupt commonmark paragraphs
.replace('table', block.gfm.table as RegExp) // interrupt paragraphs with table
.replace('blockquote', ' {0,3}>')
.replace('fences', ' {0,3}(?:`{3,}(?=[^`\\n]*\\n)|~{3,})[^\\n]*\\n')
.replace('list', ' {0,3}(?:[*+-]|1[.)]) ') // only lists starting from 1 can interrupt
.replace('html', '</?(?:tag)(?: +|\\n|/?>)|<(?:script|pre|style|textarea|!--)')
.replace('tag', block._tag) // pars can be interrupted by type (6) html blocks
.getRegex();
/**
* Pedantic grammar (original John Gruber's loose markdown specification)
*/
block.pedantic = {
...block.normal,
html: edit(
'^ *(?:comment *(?:\\n|\\s*$)'
+ '|<(tag)[\\s\\S]+?</\\1> *(?:\\n{2,}|\\s*$)' // closed tag
+ '|<tag(?:"[^"]*"|\'[^\']*\'|\\s[^\'"/>\\s]*)*?/?> *(?:\\n{2,}|\\s*$))')
.replace('comment', block._comment)
.replace(/tag/g, '(?!(?:'
+ 'a|em|strong|small|s|cite|q|dfn|abbr|data|time|code|var|samp|kbd|sub'
+ '|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo|span|br|wbr|ins|del|img)'
+ '\\b)\\w+(?!:|[^\\w\\s@]*@)\\b')
.getRegex(),
def: /^ *\[([^\]]+)\]: *<?([^\s>]+)>?(?: +(["(][^\n]+[")]))? *(?:\n+|$)/,
heading: /^(#{1,6})(.*)(?:\n+|$)/,
fences: noopTest, // fences not supported
lheading: /^(.+?)\n {0,3}(=+|-+) *(?:\n+|$)/,
paragraph: edit(block.normal._paragraph as Rule)
.replace('hr', block.hr)
.replace('heading', ' *#{1,6} *[^\n]')
.replace('lheading', block.lheading)
.replace('blockquote', ' {0,3}>')
.replace('|fences', '')
.replace('|list', '')
.replace('|html', '')
.getRegex()
};
/**
* Inline-Level Grammar
*/
// Not all rules are defined in the object literal
// @ts-expect-error
export const inline: Record<InlineRuleNames, Rule> & Record<InlineSubRuleNames, Rules> & Rules = {
escape: /^\\([!"#$%&'()*+,\-./:;<=>?@\[\]\\^_`{|}~])/,
autolink: /^<(scheme:[^\s\x00-\x1f<>]*|email)>/,
url: noopTest,
tag: '^comment'
+ '|^</[a-zA-Z][\\w:-]*\\s*>' // self-closing tag
+ '|^<[a-zA-Z][\\w-]*(?:attribute)*?\\s*/?>' // open tag
+ '|^<\\?[\\s\\S]*?\\?>' // processing instruction, e.g. <?php ?>
+ '|^<![a-zA-Z]+\\s[\\s\\S]*?>' // declaration, e.g. <!DOCTYPE html>
+ '|^<!\\[CDATA\\[[\\s\\S]*?\\]\\]>', // CDATA section
link: /^!?\[(label)\]\(\s*(href)(?:\s+(title))?\s*\)/,
reflink: /^!?\[(label)\]\[(ref)\]/,
nolink: /^!?\[(ref)\](?:\[\])?/,
reflinkSearch: 'reflink|nolink(?!\\()',
emStrong: {
lDelim: /^(?:\*+(?:((?!\*)[punct])|[^\s*]))|^_+(?:((?!_)[punct])|([^\s_]))/,
// (1) and (2) can only be a Right Delimiter. (3) and (4) can only be Left. (5) and (6) can be either Left or Right.
// | Skip orphan inside strong | Consume to delim | (1) #*** | (2) a***#, a*** | (3) #***a, ***a | (4) ***# | (5) #***# | (6) a***a
rDelimAst: /^[^_*]*?__[^_*]*?\*[^_*]*?(?=__)|[^*]+(?=[^*])|(?!\*)[punct](\*+)(?=[\s]|$)|[^punct\s](\*+)(?!\*)(?=[punct\s]|$)|(?!\*)[punct\s](\*+)(?=[^punct\s])|[\s](\*+)(?!\*)(?=[punct])|(?!\*)[punct](\*+)(?!\*)(?=[punct])|[^punct\s](\*+)(?=[^punct\s])/,
rDelimUnd: /^[^_*]*?\*\*[^_*]*?_[^_*]*?(?=\*\*)|[^_]+(?=[^_])|(?!_)[punct](_+)(?=[\s]|$)|[^punct\s](_+)(?!_)(?=[punct\s]|$)|(?!_)[punct\s](_+)(?=[^punct\s])|[\s](_+)(?!_)(?=[punct])|(?!_)[punct](_+)(?!_)(?=[punct])/ // ^- Not allowed for _
},
code: /^(`+)([^`]|[^`][\s\S]*?[^`])\1(?!`)/,
br: /^( {2,}|\\)\n(?!\s*$)/,
del: noopTest,
text: /^(`+|[^`])(?:(?= {2,}\n)|[\s\S]*?(?:(?=[\\<!\[`*_]|\b_|$)|[^ ](?= {2,}\n)))/,
punctuation: /^((?![*_])[\spunctuation])/
};
// // list of unicode punctuation marks, plus any missing characters from CommonMark spec
// inline._punctuation = '\\p{P}$+<=>`^|~';
// list of punctuation marks from CommonMark spec
// without * and _ to handle the different emphasis markers * and _
inline._uc_punctuation = '\\u00A1\\u00A7\\u00AB\\u00B6\\u00B7\\u00BB\\u00BF\\u037E\\u0387\\u055A-\\u055F\\u0589\\u058A\\u05BE\\u05C0\\u05C3\\u05C6\\u05F3\\u05F4\\u0609\\u060A\\u060C\\u060D\\u061B\\u061E\\u061F\\u066A-\\u066D\\u06D4\\u0700-\\u070D\\u07F7-\\u07F9\\u0830-\\u083E\\u085E\\u0964\\u0965\\u0970\\u0AF0\\u0DF4\\u0E4F\\u0E5A\\u0E5B\\u0F04-\\u0F12\\u0F14\\u0F3A-\\u0F3D\\u0F85\\u0FD0-\\u0FD4\\u0FD9\\u0FDA\\u104A-\\u104F\\u10FB\\u1360-\\u1368\\u1400\\u166D\\u166E\\u169B\\u169C\\u16EB-\\u16ED\\u1735\\u1736\\u17D4-\\u17D6\\u17D8-\\u17DA\\u1800-\\u180A\\u1944\\u1945\\u1A1E\\u1A1F\\u1AA0-\\u1AA6\\u1AA8-\\u1AAD\\u1B5A-\\u1B60\\u1BFC-\\u1BFF\\u1C3B-\\u1C3F\\u1C7E\\u1C7F\\u1CC0-\\u1CC7\\u1CD3\\u2010-\\u2027\\u2030-\\u2043\\u2045-\\u2051\\u2053-\\u205E\\u207D\\u207E\\u208D\\u208E\\u2308-\\u230B\\u2329\\u232A\\u2768-\\u2775\\u27C5\\u27C6\\u27E6-\\u27EF\\u2983-\\u2998\\u29D8-\\u29DB\\u29FC\\u29FD\\u2CF9-\\u2CFC\\u2CFE\\u2CFF\\u2D70\\u2E00-\\u2E2E\\u2E30-\\u2E42\\u3001-\\u3003\\u3008-\\u3011\\u3014-\\u301F\\u3030\\u303D\\u30A0\\u30FB\\uA4FE\\uA4FF\\uA60D-\\uA60F\\uA673\\uA67E\\uA6F2-\\uA6F7\\uA874-\\uA877\\uA8CE\\uA8CF\\uA8F8-\\uA8FA\\uA8FC\\uA92E\\uA92F\\uA95F\\uA9C1-\\uA9CD\\uA9DE\\uA9DF\\uAA5C-\\uAA5F\\uAADE\\uAADF\\uAAF0\\uAAF1\\uABEB\\uFD3E\\uFD3F\\uFE10-\\uFE19\\uFE30-\\uFE52\\uFE54-\\uFE61\\uFE63\\uFE68\\uFE6A\\uFE6B\\uFF01-\\uFF03\\uFF05-\\uFF0A\\uFF0C-\\uFF0F\\uFF1A\\uFF1B\\uFF1F\\uFF20\\uFF3B-\\uFF3D\\uFF3F\\uFF5B\\uFF5D\\uFF5F-\\uFF65';
inline._punctuation = '!"#$%&\'()+\\-.,/:;<=>?@\\[\\]`^{|}~\\\\' + inline._uc_punctuation;
inline.punctuation = edit(inline.punctuation, 'u').replace(/punctuation/g, inline._punctuation).getRegex();
// sequences em should skip over [title](link), `code`, <html>
inline.blockSkip = /\[[^[\]]*?\]\([^\(\)]*?\)|`[^`]*?`|<[^<>]*?>/g;
inline.anyPunctuation = /\\[punct]/g;
inline._escapes = /\\([punct])/g;
inline._comment = edit(block._comment).replace('(?:-->|$)', '-->').getRegex();
inline.emStrong.lDelim = edit(inline.emStrong.lDelim as Rule, 'u')
.replace(/punct/g, inline._punctuation)
.getRegex();
inline.emStrong.rDelimAst = edit(inline.emStrong.rDelimAst as Rule, 'gu')
.replace(/punct/g, inline._punctuation)
.getRegex();
inline.emStrong.rDelimUnd = edit(inline.emStrong.rDelimUnd as Rule, 'gu')
.replace(/punct/g, inline._punctuation)
.getRegex();
inline.anyPunctuation = edit(inline.anyPunctuation as Rule, 'gu')
.replace(/punct/g, inline._punctuation)
.getRegex();
inline._escapes = edit(inline._escapes, 'gu')
.replace(/punct/g, inline._punctuation)
.getRegex();
inline._scheme = /[a-zA-Z][a-zA-Z0-9+.-]{1,31}/;
inline._email = /[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+(@)[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?![-_])/;
inline.autolink = edit(inline.autolink)
.replace('scheme', inline._scheme)
.replace('email', inline._email)
.getRegex();
inline._attribute = /\s+[a-zA-Z:_][\w.:-]*(?:\s*=\s*"[^"]*"|\s*=\s*'[^']*'|\s*=\s*[^\s"'=<>`]+)?/;
inline.tag = edit(inline.tag)
.replace('comment', inline._comment)
.replace('attribute', inline._attribute)
.getRegex();
inline._label = /(?:\[(?:\\.|[^\[\]\\])*\]|\\.|`[^`]*`|[^\[\]\\`])*?/;
inline._href = /<(?:\\.|[^\n<>\\])+>|[^\s\x00-\x1f]*/;
inline._title = /"(?:\\"?|[^"\\])*"|'(?:\\'?|[^'\\])*'|\((?:\\\)?|[^)\\])*\)/;
inline.link = edit(inline.link)
.replace('label', inline._label)
.replace('href', inline._href)
.replace('title', inline._title)
.getRegex();
inline.reflink = edit(inline.reflink)
.replace('label', inline._label)
.replace('ref', block._label)
.getRegex();
inline.nolink = edit(inline.nolink)
.replace('ref', block._label)
.getRegex();
inline.reflinkSearch = edit(inline.reflinkSearch, 'g')
.replace('reflink', inline.reflink)
.replace('nolink', inline.nolink)
.getRegex();
/**
* Normal Inline Grammar
*/
inline.normal = { ...inline };
/**
* Pedantic Inline Grammar
*/
inline.pedantic = {
...inline.normal,
strong: {
start: /^__|\*\*/,
middle: /^__(?=\S)([\s\S]*?\S)__(?!_)|^\*\*(?=\S)([\s\S]*?\S)\*\*(?!\*)/,
endAst: /\*\*(?!\*)/g,
endUnd: /__(?!_)/g
},
em: {
start: /^_|\*/,
middle: /^()\*(?=\S)([\s\S]*?\S)\*(?!\*)|^_(?=\S)([\s\S]*?\S)_(?!_)/,
endAst: /\*(?!\*)/g,
endUnd: /_(?!_)/g
},
link: edit(/^!?\[(label)\]\((.*?)\)/)
.replace('label', inline._label)
.getRegex(),
reflink: edit(/^!?\[(label)\]\s*\[([^\]]*)\]/)
.replace('label', inline._label)
.getRegex()
};
/**
* GFM Inline Grammar
*/
inline.gfm = {
...inline.normal,
escape: edit(inline.escape).replace('])', '~|])').getRegex(),
_extended_email: /[A-Za-z0-9._+-]+(@)[a-zA-Z0-9-_]+(?:\.[a-zA-Z0-9-_]*[a-zA-Z0-9])+(?![-_])/,
url: /^((?:ftp|https?):\/\/|www\.)(?:[a-zA-Z0-9\-]+\.?)+[^\s<]*|^email/,
_backpedal: /(?:[^?!.,:;*_'"~()&]+|\([^)]*\)|&(?![a-zA-Z0-9]+;$)|[?!.,:;*_'"~)]+(?!$))+/,
del: /^(~~?)(?=[^\s~])([\s\S]*?[^\s~])\1(?=[^~]|$)/,
text: /^([`~]+|[^`~])(?:(?= {2,}\n)|(?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)|[\s\S]*?(?:(?=[\\<!\[`*~_]|\b_|https?:\/\/|ftp:\/\/|www\.|$)|[^ ](?= {2,}\n)|[^a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-](?=[a-zA-Z0-9.!#$%&'*+\/=?_`{\|}~-]+@)))/
};
inline.gfm.url = edit(inline.gfm.url as Rule, 'i')
.replace('email', inline.gfm._extended_email as RegExp)
.getRegex();
/**
* GFM + Line Breaks Inline Grammar
*/
inline.breaks = {
...inline.gfm,
br: edit(inline.br).replace('{2,}', '*').getRegex(),
text: edit(inline.gfm.text as Rule)
.replace('\\b_', '\\b_| {2,}\\n')
.replace(/\{2,\}/g, '*')
.getRegex()
};

@ -1,5 +1,5 @@
import {Token, Tokens, TokensList} from "@/utils/marked/marked"
import {Attributes, ComponentType, createElement, ReactElement} from "react"; import {Attributes, ComponentType, createElement, ReactElement} from "react";
import { Token, Tokens, TokensList } from "./src/Tokens";
const unescapeTest = /&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/ig const unescapeTest = /&(#(?:\d+)|(?:#x[0-9A-Fa-f]+)|(?:\w+));?/ig

Loading…
Cancel
Save