You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

729 lines
20 KiB

  1. // Copyright 2017 The Gitea Authors. All rights reserved.
  2. // Use of this source code is governed by a MIT-style
  3. // license that can be found in the LICENSE file.
  4. package markup
  5. import (
  6. "bytes"
  7. "net/url"
  8. "path"
  9. "path/filepath"
  10. "regexp"
  11. "strings"
  12. "code.gitea.io/gitea/modules/base"
  13. "code.gitea.io/gitea/modules/setting"
  14. "code.gitea.io/gitea/modules/util"
  15. "github.com/Unknwon/com"
  16. "golang.org/x/net/html"
  17. "golang.org/x/net/html/atom"
  18. "mvdan.cc/xurls/v2"
  19. )
  20. // Issue name styles
  21. const (
  22. IssueNameStyleNumeric = "numeric"
  23. IssueNameStyleAlphanumeric = "alphanumeric"
  24. )
  25. var (
  26. // NOTE: All below regex matching do not perform any extra validation.
  27. // Thus a link is produced even if the linked entity does not exist.
  28. // While fast, this is also incorrect and lead to false positives.
  29. // TODO: fix invalid linking issue
  30. // mentionPattern matches all mentions in the form of "@user"
  31. mentionPattern = regexp.MustCompile(`(?:\s|^|\(|\[)(@[0-9a-zA-Z-_\.]+)(?:\s|$|\)|\])`)
  32. // issueNumericPattern matches string that references to a numeric issue, e.g. #1287
  33. issueNumericPattern = regexp.MustCompile(`(?:\s|^|\(|\[)(#[0-9]+)(?:\s|$|\)|\]|\.(\s|$))`)
  34. // issueAlphanumericPattern matches string that references to an alphanumeric issue, e.g. ABC-1234
  35. issueAlphanumericPattern = regexp.MustCompile(`(?:\s|^|\(|\[)([A-Z]{1,10}-[1-9][0-9]*)(?:\s|$|\)|\]|\.(\s|$))`)
  36. // crossReferenceIssueNumericPattern matches string that references a numeric issue in a different repository
  37. // e.g. gogits/gogs#12345
  38. crossReferenceIssueNumericPattern = regexp.MustCompile(`(?:\s|^|\(|\[)([0-9a-zA-Z-_\.]+/[0-9a-zA-Z-_\.]+#[0-9]+)(?:\s|$|\)|\]|\.(\s|$))`)
  39. // sha1CurrentPattern matches string that represents a commit SHA, e.g. d8a994ef243349f321568f9e36d5c3f444b99cae
  40. // Although SHA1 hashes are 40 chars long, the regex matches the hash from 7 to 40 chars in length
  41. // so that abbreviated hash links can be used as well. This matches git and github useability.
  42. sha1CurrentPattern = regexp.MustCompile(`(?:\s|^|\(|\[)([0-9a-f]{7,40})(?:\s|$|\)|\]|\.(\s|$))`)
  43. // shortLinkPattern matches short but difficult to parse [[name|link|arg=test]] syntax
  44. shortLinkPattern = regexp.MustCompile(`\[\[(.*?)\]\](\w*)`)
  45. // anySHA1Pattern allows to split url containing SHA into parts
  46. anySHA1Pattern = regexp.MustCompile(`https?://(?:\S+/){4}([0-9a-f]{40})(/[^#\s]+)?(#\S+)?`)
  47. validLinksPattern = regexp.MustCompile(`^[a-z][\w-]+://`)
  48. // While this email regex is definitely not perfect and I'm sure you can come up
  49. // with edge cases, it is still accepted by the CommonMark specification, as
  50. // well as the HTML5 spec:
  51. // http://spec.commonmark.org/0.28/#email-address
  52. // https://html.spec.whatwg.org/multipage/input.html#e-mail-state-(type%3Demail)
  53. emailRegex = regexp.MustCompile("(?:\\s|^|\\(|\\[)([a-zA-Z0-9.!#$%&'*+\\/=?^_`{|}~-]+@[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\\.[a-zA-Z0-9]{2,}(?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+)(?:\\s|$|\\)|\\]|\\.(\\s|$))")
  54. linkRegex, _ = xurls.StrictMatchingScheme("https?://")
  55. )
  56. // regexp for full links to issues/pulls
  57. var issueFullPattern *regexp.Regexp
  58. // IsLink reports whether link fits valid format.
  59. func IsLink(link []byte) bool {
  60. return isLink(link)
  61. }
  62. // isLink reports whether link fits valid format.
  63. func isLink(link []byte) bool {
  64. return validLinksPattern.Match(link)
  65. }
  66. func isLinkStr(link string) bool {
  67. return validLinksPattern.MatchString(link)
  68. }
  69. func getIssueFullPattern() *regexp.Regexp {
  70. if issueFullPattern == nil {
  71. appURL := setting.AppURL
  72. if len(appURL) > 0 && appURL[len(appURL)-1] != '/' {
  73. appURL += "/"
  74. }
  75. issueFullPattern = regexp.MustCompile(appURL +
  76. `\w+/\w+/(?:issues|pulls)/((?:\w{1,10}-)?[1-9][0-9]*)([\?|#]\S+.(\S+)?)?\b`)
  77. }
  78. return issueFullPattern
  79. }
  80. // FindAllMentions matches mention patterns in given content
  81. // and returns a list of found user names without @ prefix.
  82. func FindAllMentions(content string) []string {
  83. mentions := mentionPattern.FindAllStringSubmatch(content, -1)
  84. ret := make([]string, len(mentions))
  85. for i, val := range mentions {
  86. ret[i] = val[1][1:]
  87. }
  88. return ret
  89. }
  90. // IsSameDomain checks if given url string has the same hostname as current Gitea instance
  91. func IsSameDomain(s string) bool {
  92. if strings.HasPrefix(s, "/") {
  93. return true
  94. }
  95. if uapp, err := url.Parse(setting.AppURL); err == nil {
  96. if u, err := url.Parse(s); err == nil {
  97. return u.Host == uapp.Host
  98. }
  99. return false
  100. }
  101. return false
  102. }
  103. type postProcessError struct {
  104. context string
  105. err error
  106. }
  107. func (p *postProcessError) Error() string {
  108. return "PostProcess: " + p.context + ", " + p.err.Error()
  109. }
  110. type processor func(ctx *postProcessCtx, node *html.Node)
  111. var defaultProcessors = []processor{
  112. fullIssuePatternProcessor,
  113. fullSha1PatternProcessor,
  114. shortLinkProcessor,
  115. linkProcessor,
  116. mentionProcessor,
  117. issueIndexPatternProcessor,
  118. crossReferenceIssueIndexPatternProcessor,
  119. sha1CurrentPatternProcessor,
  120. emailAddressProcessor,
  121. }
  122. type postProcessCtx struct {
  123. metas map[string]string
  124. urlPrefix string
  125. isWikiMarkdown bool
  126. // processors used by this context.
  127. procs []processor
  128. }
  129. // PostProcess does the final required transformations to the passed raw HTML
  130. // data, and ensures its validity. Transformations include: replacing links and
  131. // emails with HTML links, parsing shortlinks in the format of [[Link]], like
  132. // MediaWiki, linking issues in the format #ID, and mentions in the format
  133. // @user, and others.
  134. func PostProcess(
  135. rawHTML []byte,
  136. urlPrefix string,
  137. metas map[string]string,
  138. isWikiMarkdown bool,
  139. ) ([]byte, error) {
  140. // create the context from the parameters
  141. ctx := &postProcessCtx{
  142. metas: metas,
  143. urlPrefix: urlPrefix,
  144. isWikiMarkdown: isWikiMarkdown,
  145. procs: defaultProcessors,
  146. }
  147. return ctx.postProcess(rawHTML)
  148. }
  149. var commitMessageProcessors = []processor{
  150. fullIssuePatternProcessor,
  151. fullSha1PatternProcessor,
  152. linkProcessor,
  153. mentionProcessor,
  154. issueIndexPatternProcessor,
  155. crossReferenceIssueIndexPatternProcessor,
  156. sha1CurrentPatternProcessor,
  157. emailAddressProcessor,
  158. }
  159. // RenderCommitMessage will use the same logic as PostProcess, but will disable
  160. // the shortLinkProcessor and will add a defaultLinkProcessor if defaultLink is
  161. // set, which changes every text node into a link to the passed default link.
  162. func RenderCommitMessage(
  163. rawHTML []byte,
  164. urlPrefix, defaultLink string,
  165. metas map[string]string,
  166. ) ([]byte, error) {
  167. ctx := &postProcessCtx{
  168. metas: metas,
  169. urlPrefix: urlPrefix,
  170. procs: commitMessageProcessors,
  171. }
  172. if defaultLink != "" {
  173. // we don't have to fear data races, because being
  174. // commitMessageProcessors of fixed len and cap, every time we append
  175. // something to it the slice is realloc+copied, so append always
  176. // generates the slice ex-novo.
  177. ctx.procs = append(ctx.procs, genDefaultLinkProcessor(defaultLink))
  178. }
  179. return ctx.postProcess(rawHTML)
  180. }
  181. // RenderDescriptionHTML will use similar logic as PostProcess, but will
  182. // use a single special linkProcessor.
  183. func RenderDescriptionHTML(
  184. rawHTML []byte,
  185. urlPrefix string,
  186. metas map[string]string,
  187. ) ([]byte, error) {
  188. ctx := &postProcessCtx{
  189. metas: metas,
  190. urlPrefix: urlPrefix,
  191. procs: []processor{
  192. descriptionLinkProcessor,
  193. },
  194. }
  195. return ctx.postProcess(rawHTML)
  196. }
  197. var byteBodyTag = []byte("<body>")
  198. var byteBodyTagClosing = []byte("</body>")
  199. func (ctx *postProcessCtx) postProcess(rawHTML []byte) ([]byte, error) {
  200. if ctx.procs == nil {
  201. ctx.procs = defaultProcessors
  202. }
  203. // give a generous extra 50 bytes
  204. res := make([]byte, 0, len(rawHTML)+50)
  205. res = append(res, byteBodyTag...)
  206. res = append(res, rawHTML...)
  207. res = append(res, byteBodyTagClosing...)
  208. // parse the HTML
  209. nodes, err := html.ParseFragment(bytes.NewReader(res), nil)
  210. if err != nil {
  211. return nil, &postProcessError{"invalid HTML", err}
  212. }
  213. for _, node := range nodes {
  214. ctx.visitNode(node)
  215. }
  216. // Create buffer in which the data will be placed again. We know that the
  217. // length will be at least that of res; to spare a few alloc+copy, we
  218. // reuse res, resetting its length to 0.
  219. buf := bytes.NewBuffer(res[:0])
  220. // Render everything to buf.
  221. for _, node := range nodes {
  222. err = html.Render(buf, node)
  223. if err != nil {
  224. return nil, &postProcessError{"error rendering processed HTML", err}
  225. }
  226. }
  227. // remove initial parts - because Render creates a whole HTML page.
  228. res = buf.Bytes()
  229. res = res[bytes.Index(res, byteBodyTag)+len(byteBodyTag) : bytes.LastIndex(res, byteBodyTagClosing)]
  230. // Everything done successfully, return parsed data.
  231. return res, nil
  232. }
  233. func (ctx *postProcessCtx) visitNode(node *html.Node) {
  234. // We ignore code, pre and already generated links.
  235. switch node.Type {
  236. case html.TextNode:
  237. ctx.textNode(node)
  238. case html.ElementNode:
  239. if node.Data == "a" || node.Data == "code" || node.Data == "pre" {
  240. return
  241. }
  242. for n := node.FirstChild; n != nil; n = n.NextSibling {
  243. ctx.visitNode(n)
  244. }
  245. }
  246. // ignore everything else
  247. }
  248. // textNode runs the passed node through various processors, in order to handle
  249. // all kinds of special links handled by the post-processing.
  250. func (ctx *postProcessCtx) textNode(node *html.Node) {
  251. for _, processor := range ctx.procs {
  252. processor(ctx, node)
  253. }
  254. }
  255. func createLink(href, content string) *html.Node {
  256. a := &html.Node{
  257. Type: html.ElementNode,
  258. Data: atom.A.String(),
  259. Attr: []html.Attribute{{Key: "href", Val: href}},
  260. }
  261. text := &html.Node{
  262. Type: html.TextNode,
  263. Data: content,
  264. }
  265. a.AppendChild(text)
  266. return a
  267. }
  268. func createCodeLink(href, content string) *html.Node {
  269. a := &html.Node{
  270. Type: html.ElementNode,
  271. Data: atom.A.String(),
  272. Attr: []html.Attribute{{Key: "href", Val: href}},
  273. }
  274. text := &html.Node{
  275. Type: html.TextNode,
  276. Data: content,
  277. }
  278. code := &html.Node{
  279. Type: html.ElementNode,
  280. Data: atom.Code.String(),
  281. }
  282. code.AppendChild(text)
  283. a.AppendChild(code)
  284. return a
  285. }
  286. // replaceContent takes a text node, and in its content it replaces a section of
  287. // it with the specified newNode. An example to visualize how this can work can
  288. // be found here: https://play.golang.org/p/5zP8NnHZ03s
  289. func replaceContent(node *html.Node, i, j int, newNode *html.Node) {
  290. // get the data before and after the match
  291. before := node.Data[:i]
  292. after := node.Data[j:]
  293. // Replace in the current node the text, so that it is only what it is
  294. // supposed to have.
  295. node.Data = before
  296. // Get the current next sibling, before which we place the replaced data,
  297. // and after that we place the new text node.
  298. nextSibling := node.NextSibling
  299. node.Parent.InsertBefore(newNode, nextSibling)
  300. if after != "" {
  301. node.Parent.InsertBefore(&html.Node{
  302. Type: html.TextNode,
  303. Data: after,
  304. }, nextSibling)
  305. }
  306. }
  307. func mentionProcessor(_ *postProcessCtx, node *html.Node) {
  308. m := mentionPattern.FindStringSubmatchIndex(node.Data)
  309. if m == nil {
  310. return
  311. }
  312. // Replace the mention with a link to the specified user.
  313. mention := node.Data[m[2]:m[3]]
  314. replaceContent(node, m[2], m[3], createLink(util.URLJoin(setting.AppURL, mention[1:]), mention))
  315. }
  316. func shortLinkProcessor(ctx *postProcessCtx, node *html.Node) {
  317. shortLinkProcessorFull(ctx, node, false)
  318. }
  319. func shortLinkProcessorFull(ctx *postProcessCtx, node *html.Node, noLink bool) {
  320. m := shortLinkPattern.FindStringSubmatchIndex(node.Data)
  321. if m == nil {
  322. return
  323. }
  324. content := node.Data[m[2]:m[3]]
  325. tail := node.Data[m[4]:m[5]]
  326. props := make(map[string]string)
  327. // MediaWiki uses [[link|text]], while GitHub uses [[text|link]]
  328. // It makes page handling terrible, but we prefer GitHub syntax
  329. // And fall back to MediaWiki only when it is obvious from the look
  330. // Of text and link contents
  331. sl := strings.Split(content, "|")
  332. for _, v := range sl {
  333. if equalPos := strings.IndexByte(v, '='); equalPos == -1 {
  334. // There is no equal in this argument; this is a mandatory arg
  335. if props["name"] == "" {
  336. if isLinkStr(v) {
  337. // If we clearly see it is a link, we save it so
  338. // But first we need to ensure, that if both mandatory args provided
  339. // look like links, we stick to GitHub syntax
  340. if props["link"] != "" {
  341. props["name"] = props["link"]
  342. }
  343. props["link"] = strings.TrimSpace(v)
  344. } else {
  345. props["name"] = v
  346. }
  347. } else {
  348. props["link"] = strings.TrimSpace(v)
  349. }
  350. } else {
  351. // There is an equal; optional argument.
  352. sep := strings.IndexByte(v, '=')
  353. key, val := v[:sep], html.UnescapeString(v[sep+1:])
  354. // When parsing HTML, x/net/html will change all quotes which are
  355. // not used for syntax into UTF-8 quotes. So checking val[0] won't
  356. // be enough, since that only checks a single byte.
  357. if (strings.HasPrefix(val, "“") && strings.HasSuffix(val, "”")) ||
  358. (strings.HasPrefix(val, "‘") && strings.HasSuffix(val, "’")) {
  359. const lenQuote = len("‘")
  360. val = val[lenQuote : len(val)-lenQuote]
  361. }
  362. props[key] = val
  363. }
  364. }
  365. var name, link string
  366. if props["link"] != "" {
  367. link = props["link"]
  368. } else if props["name"] != "" {
  369. link = props["name"]
  370. }
  371. if props["title"] != "" {
  372. name = props["title"]
  373. } else if props["name"] != "" {
  374. name = props["name"]
  375. } else {
  376. name = link
  377. }
  378. name += tail
  379. image := false
  380. switch ext := filepath.Ext(string(link)); ext {
  381. // fast path: empty string, ignore
  382. case "":
  383. break
  384. case ".jpg", ".jpeg", ".png", ".tif", ".tiff", ".webp", ".gif", ".bmp", ".ico", ".svg":
  385. image = true
  386. }
  387. childNode := &html.Node{}
  388. linkNode := &html.Node{
  389. FirstChild: childNode,
  390. LastChild: childNode,
  391. Type: html.ElementNode,
  392. Data: "a",
  393. DataAtom: atom.A,
  394. }
  395. childNode.Parent = linkNode
  396. absoluteLink := isLinkStr(link)
  397. if !absoluteLink {
  398. if image {
  399. link = strings.Replace(link, " ", "+", -1)
  400. } else {
  401. link = strings.Replace(link, " ", "-", -1)
  402. }
  403. if !strings.Contains(link, "/") {
  404. link = url.PathEscape(link)
  405. }
  406. }
  407. urlPrefix := ctx.urlPrefix
  408. if image {
  409. if !absoluteLink {
  410. if IsSameDomain(urlPrefix) {
  411. urlPrefix = strings.Replace(urlPrefix, "/src/", "/raw/", 1)
  412. }
  413. if ctx.isWikiMarkdown {
  414. link = util.URLJoin("wiki", "raw", link)
  415. }
  416. link = util.URLJoin(urlPrefix, link)
  417. }
  418. title := props["title"]
  419. if title == "" {
  420. title = props["alt"]
  421. }
  422. if title == "" {
  423. title = path.Base(string(name))
  424. }
  425. alt := props["alt"]
  426. if alt == "" {
  427. alt = name
  428. }
  429. // make the childNode an image - if we can, we also place the alt
  430. childNode.Type = html.ElementNode
  431. childNode.Data = "img"
  432. childNode.DataAtom = atom.Img
  433. childNode.Attr = []html.Attribute{
  434. {Key: "src", Val: link},
  435. {Key: "title", Val: title},
  436. {Key: "alt", Val: alt},
  437. }
  438. if alt == "" {
  439. childNode.Attr = childNode.Attr[:2]
  440. }
  441. } else {
  442. if !absoluteLink {
  443. if ctx.isWikiMarkdown {
  444. link = util.URLJoin("wiki", link)
  445. }
  446. link = util.URLJoin(urlPrefix, link)
  447. }
  448. childNode.Type = html.TextNode
  449. childNode.Data = name
  450. }
  451. if noLink {
  452. linkNode = childNode
  453. } else {
  454. linkNode.Attr = []html.Attribute{{Key: "href", Val: link}}
  455. }
  456. replaceContent(node, m[0], m[1], linkNode)
  457. }
  458. func fullIssuePatternProcessor(ctx *postProcessCtx, node *html.Node) {
  459. if ctx.metas == nil {
  460. return
  461. }
  462. m := getIssueFullPattern().FindStringSubmatchIndex(node.Data)
  463. if m == nil {
  464. return
  465. }
  466. link := node.Data[m[0]:m[1]]
  467. id := "#" + node.Data[m[2]:m[3]]
  468. // extract repo and org name from matched link like
  469. // http://localhost:3000/gituser/myrepo/issues/1
  470. linkParts := strings.Split(path.Clean(link), "/")
  471. matchOrg := linkParts[len(linkParts)-4]
  472. matchRepo := linkParts[len(linkParts)-3]
  473. if matchOrg == ctx.metas["user"] && matchRepo == ctx.metas["repo"] {
  474. // TODO if m[4]:m[5] is not nil, then link is to a comment,
  475. // and we should indicate that in the text somehow
  476. replaceContent(node, m[0], m[1], createLink(link, id))
  477. } else {
  478. orgRepoID := matchOrg + "/" + matchRepo + id
  479. replaceContent(node, m[0], m[1], createLink(link, orgRepoID))
  480. }
  481. }
  482. func issueIndexPatternProcessor(ctx *postProcessCtx, node *html.Node) {
  483. if ctx.metas == nil {
  484. return
  485. }
  486. // default to numeric pattern, unless alphanumeric is requested.
  487. pattern := issueNumericPattern
  488. if ctx.metas["style"] == IssueNameStyleAlphanumeric {
  489. pattern = issueAlphanumericPattern
  490. }
  491. match := pattern.FindStringSubmatchIndex(node.Data)
  492. if match == nil {
  493. return
  494. }
  495. id := node.Data[match[2]:match[3]]
  496. var link *html.Node
  497. if _, ok := ctx.metas["format"]; ok {
  498. // Support for external issue tracker
  499. if ctx.metas["style"] == IssueNameStyleAlphanumeric {
  500. ctx.metas["index"] = id
  501. } else {
  502. ctx.metas["index"] = id[1:]
  503. }
  504. link = createLink(com.Expand(ctx.metas["format"], ctx.metas), id)
  505. } else {
  506. link = createLink(util.URLJoin(setting.AppURL, ctx.metas["user"], ctx.metas["repo"], "issues", id[1:]), id)
  507. }
  508. replaceContent(node, match[2], match[3], link)
  509. }
  510. func crossReferenceIssueIndexPatternProcessor(ctx *postProcessCtx, node *html.Node) {
  511. m := crossReferenceIssueNumericPattern.FindStringSubmatchIndex(node.Data)
  512. if m == nil {
  513. return
  514. }
  515. ref := node.Data[m[2]:m[3]]
  516. parts := strings.SplitN(ref, "#", 2)
  517. repo, issue := parts[0], parts[1]
  518. replaceContent(node, m[2], m[3],
  519. createLink(util.URLJoin(setting.AppURL, repo, "issues", issue), ref))
  520. }
  521. // fullSha1PatternProcessor renders SHA containing URLs
  522. func fullSha1PatternProcessor(ctx *postProcessCtx, node *html.Node) {
  523. if ctx.metas == nil {
  524. return
  525. }
  526. m := anySHA1Pattern.FindStringSubmatchIndex(node.Data)
  527. if m == nil {
  528. return
  529. }
  530. urlFull := node.Data[m[0]:m[1]]
  531. text := base.ShortSha(node.Data[m[2]:m[3]])
  532. // 3rd capture group matches a optional path
  533. subpath := ""
  534. if m[5] > 0 {
  535. subpath = node.Data[m[4]:m[5]]
  536. }
  537. // 4th capture group matches a optional url hash
  538. hash := ""
  539. if m[7] > 0 {
  540. hash = node.Data[m[6]:m[7]][1:]
  541. }
  542. start := m[0]
  543. end := m[1]
  544. // If url ends in '.', it's very likely that it is not part of the
  545. // actual url but used to finish a sentence.
  546. if strings.HasSuffix(urlFull, ".") {
  547. end--
  548. urlFull = urlFull[:len(urlFull)-1]
  549. if hash != "" {
  550. hash = hash[:len(hash)-1]
  551. } else if subpath != "" {
  552. subpath = subpath[:len(subpath)-1]
  553. }
  554. }
  555. if subpath != "" {
  556. text += subpath
  557. }
  558. if hash != "" {
  559. text += " (" + hash + ")"
  560. }
  561. replaceContent(node, start, end, createCodeLink(urlFull, text))
  562. }
  563. // sha1CurrentPatternProcessor renders SHA1 strings to corresponding links that
  564. // are assumed to be in the same repository.
  565. func sha1CurrentPatternProcessor(ctx *postProcessCtx, node *html.Node) {
  566. m := sha1CurrentPattern.FindStringSubmatchIndex(node.Data)
  567. if m == nil {
  568. return
  569. }
  570. hash := node.Data[m[2]:m[3]]
  571. // The regex does not lie, it matches the hash pattern.
  572. // However, a regex cannot know if a hash actually exists or not.
  573. // We could assume that a SHA1 hash should probably contain alphas AND numerics
  574. // but that is not always the case.
  575. // Although unlikely, deadbeef and 1234567 are valid short forms of SHA1 hash
  576. // as used by git and github for linking and thus we have to do similar.
  577. replaceContent(node, m[2], m[3],
  578. createCodeLink(util.URLJoin(setting.AppURL, ctx.metas["user"], ctx.metas["repo"], "commit", hash), base.ShortSha(hash)))
  579. }
  580. // emailAddressProcessor replaces raw email addresses with a mailto: link.
  581. func emailAddressProcessor(ctx *postProcessCtx, node *html.Node) {
  582. m := emailRegex.FindStringSubmatchIndex(node.Data)
  583. if m == nil {
  584. return
  585. }
  586. mail := node.Data[m[2]:m[3]]
  587. replaceContent(node, m[2], m[3], createLink("mailto:"+mail, mail))
  588. }
  589. // linkProcessor creates links for any HTTP or HTTPS URL not captured by
  590. // markdown.
  591. func linkProcessor(ctx *postProcessCtx, node *html.Node) {
  592. m := linkRegex.FindStringIndex(node.Data)
  593. if m == nil {
  594. return
  595. }
  596. uri := node.Data[m[0]:m[1]]
  597. replaceContent(node, m[0], m[1], createLink(uri, uri))
  598. }
  599. func genDefaultLinkProcessor(defaultLink string) processor {
  600. return func(ctx *postProcessCtx, node *html.Node) {
  601. ch := &html.Node{
  602. Parent: node,
  603. Type: html.TextNode,
  604. Data: node.Data,
  605. }
  606. node.Type = html.ElementNode
  607. node.Data = "a"
  608. node.DataAtom = atom.A
  609. node.Attr = []html.Attribute{{Key: "href", Val: defaultLink}}
  610. node.FirstChild, node.LastChild = ch, ch
  611. }
  612. }
  613. // descriptionLinkProcessor creates links for DescriptionHTML
  614. func descriptionLinkProcessor(ctx *postProcessCtx, node *html.Node) {
  615. m := linkRegex.FindStringIndex(node.Data)
  616. if m == nil {
  617. return
  618. }
  619. uri := node.Data[m[0]:m[1]]
  620. replaceContent(node, m[0], m[1], createDescriptionLink(uri, uri))
  621. }
  622. func createDescriptionLink(href, content string) *html.Node {
  623. textNode := &html.Node{
  624. Type: html.TextNode,
  625. Data: content,
  626. }
  627. linkNode := &html.Node{
  628. FirstChild: textNode,
  629. LastChild: textNode,
  630. Type: html.ElementNode,
  631. Data: "a",
  632. DataAtom: atom.A,
  633. Attr: []html.Attribute{
  634. {Key: "href", Val: href},
  635. {Key: "target", Val: "_blank"},
  636. {Key: "rel", Val: "noopener noreferrer"},
  637. },
  638. }
  639. textNode.Parent = linkNode
  640. return linkNode
  641. }