1 // Copyright 2010 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
14 "golang.org/x/net/html/atom"
17 // A TokenType is the type of a Token.
21 // ErrorToken means that an error occurred during tokenization.
22 ErrorToken TokenType = iota
23 // TextToken means a text node.
25 // A StartTagToken looks like <a>.
27 // An EndTagToken looks like </a>.
29 // A SelfClosingTagToken tag looks like <br/>.
31 // A CommentToken looks like <!--x-->.
33 // A DoctypeToken looks like <!DOCTYPE x>
37 // ErrBufferExceeded means that the buffering limit was exceeded.
38 var ErrBufferExceeded = errors.New("max buffer exceeded")
40 // String returns a string representation of the TokenType.
41 func (t TokenType) String() string {
51 case SelfClosingTagToken:
52 return "SelfClosingTag"
58 return "Invalid(" + strconv.Itoa(int(t)) + ")"
61 // An Attribute is an attribute namespace-key-value triple. Namespace is
62 // non-empty for foreign attributes like xlink, Key is alphabetic (and hence
63 // does not contain escapable characters like '&', '<' or '>'), and Val is
64 // unescaped (it looks like "a<b" rather than "a<b").
66 // Namespace is only used by the parser, not the tokenizer.
67 type Attribute struct {
68 Namespace, Key, Val string
71 // A Token consists of a TokenType and some Data (tag name for start and end
72 // tags, content for text, comments and doctypes). A tag Token may also contain
73 // a slice of Attributes. Data is unescaped for all Tokens (it looks like "a<b"
74 // rather than "a<b"). For tag Tokens, DataAtom is the atom for Data, or
75 // zero if Data is not a known tag name.
83 // tagString returns a string representation of a tag Token's Data and Attr.
84 func (t Token) tagString() string {
88 buf := bytes.NewBufferString(t.Data)
89 for _, a := range t.Attr {
91 buf.WriteString(a.Key)
99 // String returns a string representation of the Token.
100 func (t Token) String() string {
105 return EscapeString(t.Data)
107 return "<" + t.tagString() + ">"
109 return "</" + t.tagString() + ">"
110 case SelfClosingTagToken:
111 return "<" + t.tagString() + "/>"
113 return "<!--" + t.Data + "-->"
115 return "<!DOCTYPE " + t.Data + ">"
117 return "Invalid(" + strconv.Itoa(int(t.Type)) + ")"
120 // span is a range of bytes in a Tokenizer's buffer. The start is inclusive,
121 // the end is exclusive.
126 // A Tokenizer returns a stream of HTML Tokens.
127 type Tokenizer struct {
128 // r is the source of the HTML text.
130 // tt is the TokenType of the current token.
132 // err is the first error encountered during tokenization. It is possible
133 // for tt != Error && err != nil to hold: this means that Next returned a
134 // valid token but the subsequent Next call will return an error token.
135 // For example, if the HTML text input was just "plain", then the first
136 // Next call would set z.err to io.EOF but return a TextToken, and all
137 // subsequent Next calls would return an ErrorToken.
138 // err is never reset. Once it becomes non-nil, it stays non-nil.
140 // readErr is the error returned by the io.Reader r. It is separate from
141 // err because it is valid for an io.Reader to return (n int, err1 error)
142 // such that n > 0 && err1 != nil, and callers should always process the
143 // n > 0 bytes before considering the error err1.
145 // buf[raw.start:raw.end] holds the raw bytes of the current token.
146 // buf[raw.end:] is buffered input that will yield future tokens.
149 // maxBuf limits the data buffered in buf. A value of 0 means unlimited.
151 // buf[data.start:data.end] holds the raw bytes of the current token's data:
152 // a text token's text, a tag token's tag name, etc.
154 // pendingAttr is the attribute key and value currently being tokenized.
155 // When complete, pendingAttr is pushed onto attr. nAttrReturned is
156 // incremented on each call to TagAttr.
160 // rawTag is the "script" in "</script>" that closes the next token. If
161 // non-empty, the subsequent call to Next will return a raw or RCDATA text
162 // token: one that treats "<p>" as text instead of an element.
163 // rawTag's contents are lower-cased.
165 // textIsRaw is whether the current text token's data is not escaped.
167 // convertNUL is whether NUL bytes in the current token's data should
168 // be converted into \ufffd replacement characters.
170 // allowCDATA is whether CDATA sections are allowed in the current context.
174 // AllowCDATA sets whether or not the tokenizer recognizes <![CDATA[foo]]> as
175 // the text "foo". The default value is false, which means to recognize it as
176 // a bogus comment "<!-- [CDATA[foo]] -->" instead.
178 // Strictly speaking, an HTML5 compliant tokenizer should allow CDATA if and
179 // only if tokenizing foreign content, such as MathML and SVG. However,
180 // tracking foreign-contentness is difficult to do purely in the tokenizer,
181 // as opposed to the parser, due to HTML integration points: an <svg> element
182 // can contain a <foreignObject> that is foreign-to-SVG but not foreign-to-
183 // HTML. For strict compliance with the HTML5 tokenization algorithm, it is the
184 // responsibility of the user of a tokenizer to call AllowCDATA as appropriate.
185 // In practice, if using the tokenizer without caring whether MathML or SVG
186 // CDATA is text or comments, such as tokenizing HTML to find all the anchor
187 // text, it is acceptable to ignore this responsibility.
188 func (z *Tokenizer) AllowCDATA(allowCDATA bool) {
189 z.allowCDATA = allowCDATA
192 // NextIsNotRawText instructs the tokenizer that the next token should not be
193 // considered as 'raw text'. Some elements, such as script and title elements,
194 // normally require the next token after the opening tag to be 'raw text' that
195 // has no child elements. For example, tokenizing "<title>a<b>c</b>d</title>"
196 // yields a start tag token for "<title>", a text token for "a<b>c</b>d", and
197 // an end tag token for "</title>". There are no distinct start tag or end tag
198 // tokens for the "<b>" and "</b>".
200 // This tokenizer implementation will generally look for raw text at the right
201 // times. Strictly speaking, an HTML5 compliant tokenizer should not look for
202 // raw text if in foreign content: <title> generally needs raw text, but a
203 // <title> inside an <svg> does not. Another example is that a <textarea>
204 // generally needs raw text, but a <textarea> is not allowed as an immediate
205 // child of a <select>; in normal parsing, a <textarea> implies </select>, but
206 // one cannot close the implicit element when parsing a <select>'s InnerHTML.
207 // Similarly to AllowCDATA, tracking the correct moment to override raw-text-
208 // ness is difficult to do purely in the tokenizer, as opposed to the parser.
209 // For strict compliance with the HTML5 tokenization algorithm, it is the
210 // responsibility of the user of a tokenizer to call NextIsNotRawText as
211 // appropriate. In practice, like AllowCDATA, it is acceptable to ignore this
212 // responsibility for basic usage.
214 // Note that this 'raw text' concept is different from the one offered by the
215 // Tokenizer.Raw method.
216 func (z *Tokenizer) NextIsNotRawText() {
220 // Err returns the error associated with the most recent ErrorToken token.
221 // This is typically io.EOF, meaning the end of tokenization.
222 func (z *Tokenizer) Err() error {
223 if z.tt != ErrorToken {
229 // readByte returns the next byte from the input stream, doing a buffered read
230 // from z.r into z.buf if necessary. z.buf[z.raw.start:z.raw.end] remains a contiguous byte
231 // slice that holds all the bytes read so far for the current token.
232 // It sets z.err if the underlying reader returns an error.
233 // Pre-condition: z.err == nil.
234 func (z *Tokenizer) readByte() byte {
235 if z.raw.end >= len(z.buf) {
236 // Our buffer is exhausted and we have to read from z.r. Check if the
237 // previous read resulted in an error.
238 if z.readErr != nil {
242 // We copy z.buf[z.raw.start:z.raw.end] to the beginning of z.buf. If the length
243 // z.raw.end - z.raw.start is more than half the capacity of z.buf, then we
244 // allocate a new buffer before the copy.
246 d := z.raw.end - z.raw.start
249 buf1 = make([]byte, d, 2*c)
253 copy(buf1, z.buf[z.raw.start:z.raw.end])
254 if x := z.raw.start; x != 0 {
255 // Adjust the data/attr spans to refer to the same contents after the copy.
258 z.pendingAttr[0].start -= x
259 z.pendingAttr[0].end -= x
260 z.pendingAttr[1].start -= x
261 z.pendingAttr[1].end -= x
262 for i := range z.attr {
263 z.attr[i][0].start -= x
264 z.attr[i][0].end -= x
265 z.attr[i][1].start -= x
266 z.attr[i][1].end -= x
269 z.raw.start, z.raw.end, z.buf = 0, d, buf1[:d]
270 // Now that we have copied the live bytes to the start of the buffer,
271 // we read from z.r into the remainder.
273 n, z.readErr = readAtLeastOneByte(z.r, buf1[d:cap(buf1)])
280 x := z.buf[z.raw.end]
282 if z.maxBuf > 0 && z.raw.end-z.raw.start >= z.maxBuf {
283 z.err = ErrBufferExceeded
289 // Buffered returns a slice containing data buffered but not yet tokenized.
290 func (z *Tokenizer) Buffered() []byte {
291 return z.buf[z.raw.end:]
294 // readAtLeastOneByte wraps an io.Reader so that reading cannot return (0, nil).
295 // It returns io.ErrNoProgress if the underlying r.Read method returns (0, nil)
296 // too many times in succession.
297 func readAtLeastOneByte(r io.Reader, b []byte) (int, error) {
298 for i := 0; i < 100; i++ {
300 if n != 0 || err != nil {
304 return 0, io.ErrNoProgress
307 // skipWhiteSpace skips past any white space.
308 func (z *Tokenizer) skipWhiteSpace() {
318 case ' ', '\n', '\r', '\t', '\f':
327 // readRawOrRCDATA reads until the next "</foo>", where "foo" is z.rawTag and
328 // is typically something like "script" or "textarea".
329 func (z *Tokenizer) readRawOrRCDATA() {
330 if z.rawTag == "script" {
352 if z.readRawEndTag() || z.err != nil {
356 z.data.end = z.raw.end
357 // A textarea's or title's RCDATA can contain escaped entities.
358 z.textIsRaw = z.rawTag != "textarea" && z.rawTag != "title"
362 // readRawEndTag attempts to read a tag like "</foo>", where "foo" is z.rawTag.
363 // If it succeeds, it backs up the input position to reconsume the tag and
364 // returns true. Otherwise it returns false. The opening "</" has already been
366 func (z *Tokenizer) readRawEndTag() bool {
367 for i := 0; i < len(z.rawTag); i++ {
372 if c != z.rawTag[i] && c != z.rawTag[i]-('a'-'A') {
382 case ' ', '\n', '\r', '\t', '\f', '/', '>':
383 // The 3 is 2 for the leading "</" plus 1 for the trailing character c.
384 z.raw.end -= 3 + len(z.rawTag)
391 // readScript reads until the next </script> tag, following the byzantine
392 // rules for escaping/hiding the closing tag.
393 func (z *Tokenizer) readScript() {
395 z.data.end = z.raw.end
405 goto scriptDataLessThanSign
409 scriptDataLessThanSign:
416 goto scriptDataEndTagOpen
418 goto scriptDataEscapeStart
423 scriptDataEndTagOpen:
424 if z.readRawEndTag() || z.err != nil {
429 scriptDataEscapeStart:
435 goto scriptDataEscapeStartDash
440 scriptDataEscapeStartDash:
446 goto scriptDataEscapedDashDash
458 goto scriptDataEscapedDash
460 goto scriptDataEscapedLessThanSign
462 goto scriptDataEscaped
464 scriptDataEscapedDash:
471 goto scriptDataEscapedDashDash
473 goto scriptDataEscapedLessThanSign
475 goto scriptDataEscaped
477 scriptDataEscapedDashDash:
484 goto scriptDataEscapedDashDash
486 goto scriptDataEscapedLessThanSign
490 goto scriptDataEscaped
492 scriptDataEscapedLessThanSign:
498 goto scriptDataEscapedEndTagOpen
500 if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
501 goto scriptDataDoubleEscapeStart
506 scriptDataEscapedEndTagOpen:
507 if z.readRawEndTag() || z.err != nil {
510 goto scriptDataEscaped
512 scriptDataDoubleEscapeStart:
514 for i := 0; i < len("script"); i++ {
519 if c != "script"[i] && c != "SCRIPT"[i] {
521 goto scriptDataEscaped
529 case ' ', '\n', '\r', '\t', '\f', '/', '>':
530 goto scriptDataDoubleEscaped
533 goto scriptDataEscaped
535 scriptDataDoubleEscaped:
542 goto scriptDataDoubleEscapedDash
544 goto scriptDataDoubleEscapedLessThanSign
546 goto scriptDataDoubleEscaped
548 scriptDataDoubleEscapedDash:
555 goto scriptDataDoubleEscapedDashDash
557 goto scriptDataDoubleEscapedLessThanSign
559 goto scriptDataDoubleEscaped
561 scriptDataDoubleEscapedDashDash:
568 goto scriptDataDoubleEscapedDashDash
570 goto scriptDataDoubleEscapedLessThanSign
574 goto scriptDataDoubleEscaped
576 scriptDataDoubleEscapedLessThanSign:
582 goto scriptDataDoubleEscapeEnd
585 goto scriptDataDoubleEscaped
587 scriptDataDoubleEscapeEnd:
588 if z.readRawEndTag() {
589 z.raw.end += len("</script>")
590 goto scriptDataEscaped
595 goto scriptDataDoubleEscaped
598 // readComment reads the next comment token starting with "<!--". The opening
599 // "<!--" has already been consumed.
600 func (z *Tokenizer) readComment() {
601 z.data.start = z.raw.end
603 if z.data.end < z.data.start {
604 // It's a comment with no data, like <!-->.
605 z.data.end = z.data.start
608 for dashCount := 2; ; {
611 // Ignore up to two dashes at EOF.
615 z.data.end = z.raw.end - dashCount
624 z.data.end = z.raw.end - len("-->")
631 z.data.end = z.raw.end
635 z.data.end = z.raw.end - len("--!>")
644 // readUntilCloseAngle reads until the next ">".
645 func (z *Tokenizer) readUntilCloseAngle() {
646 z.data.start = z.raw.end
650 z.data.end = z.raw.end
654 z.data.end = z.raw.end - len(">")
660 // readMarkupDeclaration reads the next token starting with "<!". It might be
661 // a "<!--comment-->", a "<!DOCTYPE foo>", a "<![CDATA[section]]>" or
662 // "<!a bogus comment". The opening "<!" has already been consumed.
663 func (z *Tokenizer) readMarkupDeclaration() TokenType {
664 z.data.start = z.raw.end
666 for i := 0; i < 2; i++ {
669 z.data.end = z.raw.end
673 if c[0] == '-' && c[1] == '-' {
681 if z.allowCDATA && z.readCDATA() {
685 // It's a bogus comment.
686 z.readUntilCloseAngle()
690 // readDoctype attempts to read a doctype declaration and returns true if
691 // successful. The opening "<!" has already been consumed.
692 func (z *Tokenizer) readDoctype() bool {
694 for i := 0; i < len(s); i++ {
697 z.data.end = z.raw.end
700 if c != s[i] && c != s[i]+('a'-'A') {
701 // Back up to read the fragment of "DOCTYPE" again.
702 z.raw.end = z.data.start
706 if z.skipWhiteSpace(); z.err != nil {
707 z.data.start = z.raw.end
708 z.data.end = z.raw.end
711 z.readUntilCloseAngle()
715 // readCDATA attempts to read a CDATA section and returns true if
716 // successful. The opening "<!" has already been consumed.
717 func (z *Tokenizer) readCDATA() bool {
719 for i := 0; i < len(s); i++ {
722 z.data.end = z.raw.end
726 // Back up to read the fragment of "[CDATA[" again.
727 z.raw.end = z.data.start
731 z.data.start = z.raw.end
736 z.data.end = z.raw.end
744 z.data.end = z.raw.end - len("]]>")
754 // startTagIn returns whether the start tag in z.buf[z.data.start:z.data.end]
755 // case-insensitively matches any element of ss.
756 func (z *Tokenizer) startTagIn(ss ...string) bool {
758 for _, s := range ss {
759 if z.data.end-z.data.start != len(s) {
762 for i := 0; i < len(s); i++ {
763 c := z.buf[z.data.start+i]
764 if 'A' <= c && c <= 'Z' {
776 // readStartTag reads the next start tag token. The opening "<a" has already
777 // been consumed, where 'a' means anything in [A-Za-z].
778 func (z *Tokenizer) readStartTag() TokenType {
783 // Several tags flag the tokenizer's next token as raw.
784 c, raw := z.buf[z.data.start], false
785 if 'A' <= c && c <= 'Z' {
790 raw = z.startTagIn("iframe")
792 raw = z.startTagIn("noembed", "noframes", "noscript")
794 raw = z.startTagIn("plaintext")
796 raw = z.startTagIn("script", "style")
798 raw = z.startTagIn("textarea", "title")
800 raw = z.startTagIn("xmp")
803 z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end]))
805 // Look for a self-closing token like "<br/>".
806 if z.err == nil && z.buf[z.raw.end-2] == '/' {
807 return SelfClosingTagToken
812 // readTag reads the next tag token and its attributes. If saveAttr, those
813 // attributes are saved in z.attr, otherwise z.attr is set to an empty slice.
814 // The opening "<a" or "</a" has already been consumed, where 'a' means anything
816 func (z *Tokenizer) readTag(saveAttr bool) {
819 // Read the tag name and attribute key/value pairs.
821 if z.skipWhiteSpace(); z.err != nil {
826 if z.err != nil || c == '>' {
832 // Save pendingAttr if saveAttr and that attribute has a non-empty key.
833 if saveAttr && z.pendingAttr[0].start != z.pendingAttr[0].end {
834 z.attr = append(z.attr, z.pendingAttr)
836 if z.skipWhiteSpace(); z.err != nil {
842 // readTagName sets z.data to the "div" in "<div k=v>". The reader (z.raw.end)
843 // is positioned such that the first byte of the tag name (the "d" in "<div")
844 // has already been consumed.
845 func (z *Tokenizer) readTagName() {
846 z.data.start = z.raw.end - 1
850 z.data.end = z.raw.end
854 case ' ', '\n', '\r', '\t', '\f':
855 z.data.end = z.raw.end - 1
859 z.data.end = z.raw.end
865 // readTagAttrKey sets z.pendingAttr[0] to the "k" in "<div k=v>".
866 // Precondition: z.err == nil.
867 func (z *Tokenizer) readTagAttrKey() {
868 z.pendingAttr[0].start = z.raw.end
872 z.pendingAttr[0].end = z.raw.end
876 case ' ', '\n', '\r', '\t', '\f', '/':
877 z.pendingAttr[0].end = z.raw.end - 1
881 z.pendingAttr[0].end = z.raw.end
887 // readTagAttrVal sets z.pendingAttr[1] to the "v" in "<div k=v>".
888 func (z *Tokenizer) readTagAttrVal() {
889 z.pendingAttr[1].start = z.raw.end
890 z.pendingAttr[1].end = z.raw.end
891 if z.skipWhiteSpace(); z.err != nil {
902 if z.skipWhiteSpace(); z.err != nil {
905 quote := z.readByte()
915 z.pendingAttr[1].start = z.raw.end
919 z.pendingAttr[1].end = z.raw.end
923 z.pendingAttr[1].end = z.raw.end - 1
929 z.pendingAttr[1].start = z.raw.end - 1
933 z.pendingAttr[1].end = z.raw.end
937 case ' ', '\n', '\r', '\t', '\f':
938 z.pendingAttr[1].end = z.raw.end - 1
942 z.pendingAttr[1].end = z.raw.end
949 // Next scans the next token and returns its type.
950 func (z *Tokenizer) Next() TokenType {
951 z.raw.start = z.raw.end
952 z.data.start = z.raw.end
953 z.data.end = z.raw.end
959 if z.rawTag == "plaintext" {
960 // Read everything up to EOF.
964 z.data.end = z.raw.end
969 if z.data.end > z.data.start {
988 // Check if the '<' we have just read is part of a tag, comment
989 // or doctype. If not, it's part of the accumulated text token.
994 var tokenType TokenType
996 case 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z':
997 tokenType = StartTagToken
999 tokenType = EndTagToken
1000 case c == '!' || c == '?':
1001 // We use CommentToken to mean any of "<!--actual comments-->",
1002 // "<!DOCTYPE declarations>" and "<?xml processing instructions?>".
1003 tokenType = CommentToken
1005 // Reconsume the current character.
1010 // We have a non-text token, but we might have accumulated some text
1011 // before that. If so, we return the text first, and return the non-
1012 // text token on the subsequent call to Next.
1013 if x := z.raw.end - len("<a"); z.raw.start < x {
1021 z.tt = z.readStartTag()
1029 // "</>" does not generate a token at all. Generate an empty comment
1030 // to allow passthrough clients to pick up the data using Raw.
1031 // Reset the tokenizer state and start again.
1035 if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' {
1045 z.readUntilCloseAngle()
1050 z.tt = z.readMarkupDeclaration()
1054 z.readUntilCloseAngle()
1059 if z.raw.start < z.raw.end {
1060 z.data.end = z.raw.end
1068 // Raw returns the unmodified text of the current token. Calling Next, Token,
1069 // Text, TagName or TagAttr may change the contents of the returned slice.
1070 func (z *Tokenizer) Raw() []byte {
1071 return z.buf[z.raw.start:z.raw.end]
1074 // convertNewlines converts "\r" and "\r\n" in s to "\n".
1075 // The conversion happens in place, but the resulting slice may be shorter.
1076 func convertNewlines(s []byte) []byte {
1077 for i, c := range s {
1083 if src >= len(s) || s[src] != '\n' {
1091 if src+1 < len(s) && s[src+1] == '\n' {
1107 nul = []byte("\x00")
1108 replacement = []byte("\ufffd")
1111 // Text returns the unescaped text of a text, comment or doctype token. The
1112 // contents of the returned slice may change on the next call to Next.
1113 func (z *Tokenizer) Text() []byte {
1115 case TextToken, CommentToken, DoctypeToken:
1116 s := z.buf[z.data.start:z.data.end]
1117 z.data.start = z.raw.end
1118 z.data.end = z.raw.end
1119 s = convertNewlines(s)
1120 if (z.convertNUL || z.tt == CommentToken) && bytes.Contains(s, nul) {
1121 s = bytes.Replace(s, nul, replacement, -1)
1124 s = unescape(s, false)
1131 // TagName returns the lower-cased name of a tag token (the `img` out of
1132 // `<IMG SRC="foo">`) and whether the tag has attributes.
1133 // The contents of the returned slice may change on the next call to Next.
1134 func (z *Tokenizer) TagName() (name []byte, hasAttr bool) {
1135 if z.data.start < z.data.end {
1137 case StartTagToken, EndTagToken, SelfClosingTagToken:
1138 s := z.buf[z.data.start:z.data.end]
1139 z.data.start = z.raw.end
1140 z.data.end = z.raw.end
1141 return lower(s), z.nAttrReturned < len(z.attr)
1147 // TagAttr returns the lower-cased key and unescaped value of the next unparsed
1148 // attribute for the current tag token and whether there are more attributes.
1149 // The contents of the returned slices may change on the next call to Next.
1150 func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) {
1151 if z.nAttrReturned < len(z.attr) {
1153 case StartTagToken, SelfClosingTagToken:
1154 x := z.attr[z.nAttrReturned]
1156 key = z.buf[x[0].start:x[0].end]
1157 val = z.buf[x[1].start:x[1].end]
1158 return lower(key), unescape(convertNewlines(val), true), z.nAttrReturned < len(z.attr)
1161 return nil, nil, false
1164 // Token returns the next Token. The result's Data and Attr values remain valid
1165 // after subsequent Next calls.
1166 func (z *Tokenizer) Token() Token {
1167 t := Token{Type: z.tt}
1169 case TextToken, CommentToken, DoctypeToken:
1170 t.Data = string(z.Text())
1171 case StartTagToken, SelfClosingTagToken, EndTagToken:
1172 name, moreAttr := z.TagName()
1175 key, val, moreAttr = z.TagAttr()
1176 t.Attr = append(t.Attr, Attribute{"", atom.String(key), string(val)})
1178 if a := atom.Lookup(name); a != 0 {
1179 t.DataAtom, t.Data = a, a.String()
1181 t.DataAtom, t.Data = 0, string(name)
1187 // SetMaxBuf sets a limit on the amount of data buffered during tokenization.
1188 // A value of 0 means unlimited.
1189 func (z *Tokenizer) SetMaxBuf(n int) {
1193 // NewTokenizer returns a new HTML Tokenizer for the given Reader.
1194 // The input is assumed to be UTF-8 encoded.
1195 func NewTokenizer(r io.Reader) *Tokenizer {
1196 return NewTokenizerFragment(r, "")
1199 // NewTokenizerFragment returns a new HTML Tokenizer for the given Reader, for
1200 // tokenizing an existing element's InnerHTML fragment. contextTag is that
1201 // element's tag, such as "div" or "iframe".
1203 // For example, how the InnerHTML "a<b" is tokenized depends on whether it is
1204 // for a <p> tag or a <script> tag.
1206 // The input is assumed to be UTF-8 encoded.
1207 func NewTokenizerFragment(r io.Reader, contextTag string) *Tokenizer {
1210 buf: make([]byte, 0, 4096),
1212 if contextTag != "" {
1213 switch s := strings.ToLower(contextTag); s {
1214 case "iframe", "noembed", "noframes", "noscript", "plaintext", "script", "style", "title", "textarea", "xmp":