diff --git a/Makefile b/Makefile index 0ab243f..8fa2c64 100644 --- a/Makefile +++ b/Makefile @@ -13,8 +13,11 @@ FILEMODE = 444 all: clean build +dev: + go run main.go --content-dir content --template-dir templates --static-dir static --listen "127.0.0.1:8080" + build: - hugo + go run main.go --content-dir content --template-dir templates --static-dir static --output-dir $(HTTPDIR) clean: -rm -r public/* diff --git a/config.toml b/config.toml deleted file mode 100644 index 93bf53d..0000000 --- a/config.toml +++ /dev/null @@ -1,11 +0,0 @@ -baseurl = "" -languageCode = "en-us" -title = "zero-knowledge" -theme = "zero" -SectionPagesMenu = "main" -Paginate = 12 -disableRSS = false - -[taxonomies] - author = "author" - tag = "tags" diff --git a/content/about.md b/content/about.md deleted file mode 100644 index 0cfdd1e..0000000 --- a/content/about.md +++ /dev/null @@ -1,11 +0,0 @@ -+++ -date = "2015-10-11T20:00:29+02:00" -draft = true -title = "about" - -+++ - -## about zero-knowledge - -This blog is the personal blog of Gibheer and Stormwind, where we write about -any topic from IT which keeps us working at the moment. diff --git a/content/author/gibheer.md b/content/author/gibheer.md index 4b73ddd..f44f58f 100644 --- a/content/author/gibheer.md +++ b/content/author/gibheer.md @@ -1,6 +1,7 @@ +++ title = "Gibheer" date = "2015-11-04T12:23:00+02:00" +url = "/author/Gibheer" +++ ## about me @@ -17,8 +18,8 @@ learn from it and try it another way next time. Most of the stuff I try in private are online either on github or my own git server. What isn't code, I try to write down on the blog. -As for social media, I'm on [freenode](irc://irc.freenode.org/) under the nick -Gibheer. +As for social media, I'm on [libera.chat](ircs://irc.libera.chat:6697) with the nick +'Gibheer'. ## links diff --git a/content/author/stormwind.md b/content/author/stormwind.md index 06bde06..85c970a 100644 --- a/content/author/stormwind.md +++ b/content/author/stormwind.md @@ -1,6 +1,7 @@ +++ title = "Stormwind" date = "2015-11-04T12:40:00+02:00" +url = "/author/Stormwind" +++ introduction diff --git a/content/index.md b/content/index.md new file mode 100644 index 0000000..95e8a51 --- /dev/null +++ b/content/index.md @@ -0,0 +1,135 @@ ++++ +title = "blog" +author = "gibheer" +url = "/" +template = "index.html" ++++ + +This blog is maintained by [Gibheer](/author/Gibheer) and [Stormwind](/author/Stormwind) +about various topics. + + * [link summary 2016/07/08](post/127.md) + * [poudriere in jails with zfs](post/126.md) + * [gotchas with IPs and Jails](post/125.md) + * [link summary 2016/04/09](post/124.md) + * [json/curl to go](post/123.md) + * [configuring raids on freebsd](post/122.md) + * [fast application locks](post/121.md) + * [new blog engine](post/120.md) + * [ssh certificates part 2](post/119.md) + * [ssh certificates part 1](post/118.md) + * [S.M.A.R.T. values](post/117.md) + * [minimal nginx configuration](post/115.md) + * [pgstats - vmstat like stats for postgres](post/114.md) + * [setting zpool features](post/113.md) + * [using unbound and dnsmasq](post/112.md) + * [common table expressions in postgres](post/111.md) + * [range types in postgres](post/110.md) + * [learning the ansible way](post/109.md) + * [playing with go](post/108.md) + * [no cfengine anymore](post/107.md) + * [scan to samba share with HP Officejet pro 8600](post/106.md) + * [\[cfengine\] log to syslog](post/105.md) + * [overhaul of the blog](post/104.md) + * [block mails for unknown users](post/103.md) + * [choosing a firewall on freebsd](post/102.md) + * [use dovecot to store mails with lmtp](post/100.md) + * [grub can't read zpool](post/99.md) + * [sysidcfg replacement on omnios](post/98.md) + * [filter program logs in freebsd syslog](post/97.md) + * [moving a zone between zpools](post/96.md) + * [compile errors on omnios with llvm](post/95.md) + * [inner and natural joins](post/94.md) + * [release of zero 0.1.0](post/93.md) + * [building a multi instance postgres systemd service](post/92.md) + * [automatic locking of the screen](post/91.md) + * [rotate log files with logadm](post/90.md) + * [Solaris SMF on linux with systemd](post/89.md) + * [create encrypted password for postgresql](post/88.md) + * [extend PATH in Makefile](post/87.md) + * [touchpad keeps scrolling](post/86.md) + * [Schwarze Seelen brauchen bunte Socken 2012.1](post/85.md) + * [Backups with ZFS over the wire](post/84.md) + * [the Illumos eco system](post/83.md) + * [archlinux + rubygems = gem executables will not run](post/82.md) + * [Lustige Gehversuche mit... verschlüsselten Festplatten](post/81.md) + * [find cycle detected](post/80.md) + * [openindiana - getting rubinius to work](post/79.md) + * [openindiana - curl CA failure](post/78.md) + * [openindiana - set up ssh with kerberos authentication](post/77.md) + * [great resource to ipfilter](post/76.md) + * [openindiana - ntpd does not start](post/75.md) + * [openindiana - how to configure a zone](post/74.md) + * [openindiana - how to get routing working](post/73.md) + * [How to use sysidcfg for zone deployment](post/72.md) + * [set environment variables in smf manifests](post/71.md) + * [get pfexec back in Solaris](post/70.md) + * [Solaris - a new way to 'ifconfig'](post/69.md) + * [OpenIndiana 151a released](post/68.md) + * [PostgreSQL 9.1 was released](post/67.md) + * [SmartOS - hype and a demo iso](post/66.md) + * [SmartOS - a new Solaris](post/65.md) + * [neues Lebenszeichen - neuer Blog](post/64.md) + * [Accesslogs in die Datenbank](post/63.md) + * [Schwarze Seelen brauchen bunte Socken - Teil 3](post/62.md) + * [Technik hinter dem neuen Blog](post/61.md) + * [jede Menge Umzuege](post/60.md) + * [DTrace fuer den Linuxlator in FreeBSD](post/59.md) + * [daily zfs snapshots](post/58.md) + * [Dokumentation in Textile schreiben](post/57.md) + * [Shells in anderen Sprachen](post/56.md) + * [ZFS Versionen](post/55.md) + * [Spielwahn mit Wasser](post/54.md) + * [FreeBSD Status Report Juli - September 2010](post/53.md) + * [Spass mit test-driven development](post/52.md) + * [dtrace userland in FreeBSD head](post/51.md) + * [Alle Tabellen einer DB loeschen mit PostgreSQL 9.0](post/50.md) + * [Shellbefehle im Vim ausfuehren](post/49.md) + * [zero-knowledge mit IPv6 Teil 2](post/48.md) + * [[Rubyconf 2009] Worst Ideas Ever](post/47.md) + * [Nachfolger von Tex](post/46.md) + * [Linux und Windows im Auto](post/45.md) + * [zero-knowledge jetzt auch per IPv6](post/44.md) + * [Der Drackenzackenschal](post/43.md) + * [Kalender auf der Konsole](post/42.md) + * [NetBeans 6.9 released](post/41.md) + * [Das Wollefest in Nierstein](post/40.md) + * [PostgreSQL - mehrere Werte aus einer Funktion](post/39.md) + * [Schwarze Seelen brauchen bunte Socken - Teil 2](post/38.md) + * [Serverumzug vollendet](post/37.md) + * [MySQL kann Datensaetze \"zerreissen\"](post/36.md) + * [Umzug mit OpenSolaris 20x0.xx](post/35.md) + * [Blub gibt es ab sofort auch fuer unterwegs](post/34.md) + * [OpenSolaris Zones mit statischer IP](post/33.md) + * [Blog nicht da](post/32.md) + * [gefaehrliches Spiel fuer das n900](post/31.md) + * [neuer CLI-Client fuer XMMS2](post/30.md) + * [Claws Mail laeuft auf OpenSolaris](post/29.md) + * [publisher contains only packages from other publisher](post/28.md) + * [PostgreSQL 8.4 in OpenSolaris](post/27.md) + * [mit PHP Mailadressen validieren](post/26.md) + * [Lustige Gehversuche mit ...](post/25.md) + * [Performance, Programme und viel Musik](post/24.md) + * [von Linux zu OpenSolaris](post/23.md) + * [Gibheers zsh-config](post/22.md) + * [Crossbow mit Solaris Containern](post/21.md) + * [Lustige Gehversuche mit Gentoo/FreeBSD](post/20.md) + * [Heidelbeertigerarmstulpen](post/19.md) + * [OpenVPN unter OpenSolaris](post/18.md) + * [OpenSolaris Wiki](post/17.md) + * [OpenSolaris ohne Reboot updaten](post/16.md) + * [einzelne Pakete unter OpenSolaris updaten](post/15.md) + * [Rails mit Problemen unter OpenSolaris](post/14.md) + * [Wie wenig braucht OpenSolaris?](post/13.md) + * [das eklige Gesicht XMLs](post/12.md) + * [Dokumentation fuer (Open)Solaris](post/11.md) + * [Woche der Updates](post/10.md) + * [Was ist XMMS2?](post/9.md) + * [Rack und XMMS2](post/8.md) + * [Webserver unter Ruby](post/7.md) + * [Symbole in Ruby](post/6.md) + * [Schwarze Seelen brauchen bunte Socken](post/5.md) + * [Zero-knowledge spielt wieder Icewars](post/4.md) + * [Serendipity als Blog?](post/3.md) + * [Indizes statt Tabellen](post/2.md) + * [zero-knowledge ohne Forum](post/1.md) diff --git a/content/post/40.md b/content/post/40.md index 6e23f8b..e168171 100644 --- a/content/post/40.md +++ b/content/post/40.md @@ -36,8 +36,8 @@ naja, jetzt brauche ich unbedingt ganz viel Wolle. Hier nochmal ein Dank an Nathalie und ihre Mutter, die beide den Workshop betreut haben. Das hat echt Spaß gemacht und ich denke ich werde auch in Zukunft noch ganz viel zumspinnen. :)\ -!(float\_right)/images/wolle4.jpg(4 Knaeule bunte Wolle vom -Wolldrachen)!\ +![4 Knaeule bunte Wolle vom Wolldrachen](/static/pics/wolle4.jpg) + Desweiteren muss ich erzählen, dass der [Wolldrache](http://drachenwolle.de/) auch hier mit ihrem Stand zu finden war. Und das gemeinerweise direkt am Anfang des Festplatzes. diff --git a/content/post/64.md b/content/post/64.md index d975959..d3f5a56 100644 --- a/content/post/64.md +++ b/content/post/64.md @@ -5,10 +5,10 @@ author = "Gibheer" draft = false +++ -Nachdem es hier lange Still war, gibt es mal wieder ein Update. In der zwischenzeit haben wir den Blog auf eine eigene Software umgezogen, weil uns Jekyll nicht gepasst hat. Fuer mich war es zwar einfach von der Konsole aus die Beitraege zu verfassen, allerdings fehlte die Moeglichkeit auch mal von unterwegs "schnell" etwas zu verfassen. - -Nun haben wir eine eigene Blogsoftware (die auch auf github liegt). Mal schauen wie gut wir damit zurecht kommen. Im Gegensatz zu jekyll generieren wir keine statischen Files, sondern der Content wird in der Datenbank gespeichert und bei jedem Request neu generiert. Das ist im Moment noch etwas langsam, aber da werd ich noch was bauen, damit das besser passt. - -Es wird noch eine Kommentarfunktion hinzukommen und es ist geplant unterschiedliche Typen von Blogposts machen zu koennen. Ersteres wird wahrscheinlich recht einfach werden, letztes ist im Moment nur eine grobe Idee in meinem Kopf. - +Nachdem es hier lange Still war, gibt es mal wieder ein Update. In der zwischenzeit haben wir den Blog auf eine eigene Software umgezogen, weil uns Jekyll nicht gepasst hat. Fuer mich war es zwar einfach von der Konsole aus die Beitraege zu verfassen, allerdings fehlte die Moeglichkeit auch mal von unterwegs "schnell" etwas zu verfassen. + +Nun haben wir eine eigene Blogsoftware (die auch auf github liegt). Mal schauen wie gut wir damit zurecht kommen. Im Gegensatz zu jekyll generieren wir keine statischen Files, sondern der Content wird in der Datenbank gespeichert und bei jedem Request neu generiert. Das ist im Moment noch etwas langsam, aber da werd ich noch was bauen, damit das besser passt. + +Es wird noch eine Kommentarfunktion hinzukommen und es ist geplant unterschiedliche Typen von Blogposts machen zu koennen. Ersteres wird wahrscheinlich recht einfach werden, letztes ist im Moment nur eine grobe Idee in meinem Kopf. + Es ist auf jeden Fall ein nettes Experiment und mal schauen, wie es sich in Zukunft weiter entwickeln wird. diff --git a/content/post/65.md b/content/post/65.md index 64a1e0a..01a70a1 100644 --- a/content/post/65.md +++ b/content/post/65.md @@ -5,16 +5,16 @@ author = "Gibheer" draft = false +++ -Some minutes ago I saw on [hacker news](http://news.ycombinator.com/) the following line [Joyent Open Sources SmartOS: Zones, ZFS, DTrace and KVM (smartos.org)](http://smartos.org/). -Who is behind SmartOS? -====================== - -What does that mean? I took a look and it seems, that Joyent, the company behind [node.js](http://nodejs.org/), has released their distribution of [Illumos](https://www.illumos.org/). -After the merge of sun and oracle, OpenSolaris as a project was closed in favor of Solaris11. As OpenSolaris was OpenSource the project Illumos emerged from the remains of OpenSolaris, but there was no release of the Illumos kernel in any project till now. - -So what is different? -===================== - -The first things I saw on their page are dtrace zfs and zones. So it's a standard solaris. But there is more: *KVM*! If the existence of zones means also, that it has crossbow and resource limits, then it would be absolutely gorgeous! It would be possible to build the core services on solaris zones and on top of that multiple dev or production machines with linux, windows or whatever you want. - +Some minutes ago I saw on [hacker news](http://news.ycombinator.com/) the following line [Joyent Open Sources SmartOS: Zones, ZFS, DTrace and KVM (smartos.org)](http://smartos.org/). +Who is behind SmartOS? +====================== + +What does that mean? I took a look and it seems, that Joyent, the company behind [node.js](http://nodejs.org/), has released their distribution of [Illumos](https://www.illumos.org/). +After the merge of sun and oracle, OpenSolaris as a project was closed in favor of Solaris11. As OpenSolaris was OpenSource the project Illumos emerged from the remains of OpenSolaris, but there was no release of the Illumos kernel in any project till now. + +So what is different? +===================== + +The first things I saw on their page are dtrace zfs and zones. So it's a standard solaris. But there is more: *KVM*! If the existence of zones means also, that it has crossbow and resource limits, then it would be absolutely gorgeous! It would be possible to build the core services on solaris zones and on top of that multiple dev or production machines with linux, windows or whatever you want. + I will test it first in a virtual box to see, how stable and usable it really is, as there is no documentation on the website yet. After my test I will report back. diff --git a/content/post/66.md b/content/post/66.md index f898e00..875f024 100644 --- a/content/post/66.md +++ b/content/post/66.md @@ -5,12 +5,12 @@ author = "Gibheer" draft = false +++ -So, there is this new distribution of Illumos, [SmartOS](http://smartos.org) but it's not as ready as they claimed. Sure, there is an ISO but that ISO has no installer and no package manager. So one of the crucial part for using SmartOS is missing. - -As Joyent wrote on the [blog](http://blog.smartos.org) they are working on a wiki and the documentation and this night, they showed the [wiki](http://wiki.smartos.org). Until now there is only a documentation on how to use the usb image which got released the same time. But i think, that there will be much more coming. - -At the same time I found out, that kvm was released into the Illumos core too, so that kvm will be available with every other distribution too. And [OpenIndiana](http://openindiana.org) said, they want it in their 151 release too. 151 was planned to be released some months ago, so let's see, how fast they can get that out to the users. - -Joyent too should release a real distribution as fast as they can, because they created a large hype for SmartOS, but have nothing to use it in production. The ports are missing and an upgrade path is missing too. They wrote, that they are already using it in production, so why did they not release that? - +So, there is this new distribution of Illumos, [SmartOS](http://smartos.org) but it's not as ready as they claimed. Sure, there is an ISO but that ISO has no installer and no package manager. So one of the crucial part for using SmartOS is missing. + +As Joyent wrote on the [blog](http://blog.smartos.org) they are working on a wiki and the documentation and this night, they showed the [wiki](http://wiki.smartos.org). Until now there is only a documentation on how to use the usb image which got released the same time. But i think, that there will be much more coming. + +At the same time I found out, that kvm was released into the Illumos core too, so that kvm will be available with every other distribution too. And [OpenIndiana](http://openindiana.org) said, they want it in their 151 release too. 151 was planned to be released some months ago, so let's see, how fast they can get that out to the users. + +Joyent too should release a real distribution as fast as they can, because they created a large hype for SmartOS, but have nothing to use it in production. The ports are missing and an upgrade path is missing too. They wrote, that they are already using it in production, so why did they not release that? + Illumos, OpenIndiana and Joyent with SmartOS are missing a big chance here to make that fork of OpenSolaris popular. They created much traction, but without having something, which could be used in production. We will see, how fast they can react. Hopefully, the release of either OpenIndiana or SmartOS, will be useable and stable in production. Then, they have a chance of getting me as an user. diff --git a/content/post/67.md b/content/post/67.md index 1de80eb..6be6061 100644 --- a/content/post/67.md +++ b/content/post/67.md @@ -5,6 +5,6 @@ author = "Gibheer" draft = false +++ -Yesterday PostgreSQL 9.1 was released. It has some neat features included, like writable common table expressions, synchronized replication and unlogged tables. Apart from that, some performance tuning was included as well. - +Yesterday PostgreSQL 9.1 was released. It has some neat features included, like writable common table expressions, synchronized replication and unlogged tables. Apart from that, some performance tuning was included as well. + If you are interested, take a look yourself at the [release notes](http://www.postgresql.org/about/news.1349) diff --git a/content/post/68.md b/content/post/68.md index b56debe..61d072e 100644 --- a/content/post/68.md +++ b/content/post/68.md @@ -5,10 +5,10 @@ author = "Gibheer" draft = false +++ -After the release of [PostgreSQL 9.1](http://www.postgresql.org/about/news.1349), today another great open source project released a new version - [OpenIndiana](http://wiki.openindiana.org/oi/oi_151a+Release+Notes). - -OpenIndiana is based on a fork of OpenSolaris, named [Illumos](http://illumos.org). It was announced in august 2010. OpenIndiana has evolved since that time and got a stable release 148 and today 151a. That release is very solid and got one thing, which Solaris 11 has and most likely will never have: *KVM*. - -So from today you get a Solaris fork with crossbow, resource containers, zones and the kernel virtual machine, converted from linux to Illumos from the developers of [Joyent](http://joyent.com). They built there own distribution, [SmartOS](http://smartos.org), which is a bootable OS for managing a cloud like setup but without the zones. - +After the release of [PostgreSQL 9.1](http://www.postgresql.org/about/news.1349), today another great open source project released a new version - [OpenIndiana](http://wiki.openindiana.org/oi/oi_151a+Release+Notes). + +OpenIndiana is based on a fork of OpenSolaris, named [Illumos](http://illumos.org). It was announced in august 2010. OpenIndiana has evolved since that time and got a stable release 148 and today 151a. That release is very solid and got one thing, which Solaris 11 has and most likely will never have: *KVM*. + +So from today you get a Solaris fork with crossbow, resource containers, zones and the kernel virtual machine, converted from linux to Illumos from the developers of [Joyent](http://joyent.com). They built there own distribution, [SmartOS](http://smartos.org), which is a bootable OS for managing a cloud like setup but without the zones. + So if you have a large Infrastructure and want to seperate some programs from each other or have some old infrastructure, try OpenIndiana and it's zones and kvm. diff --git a/content/post/69.md b/content/post/69.md index e803e2e..f68cf80 100644 --- a/content/post/69.md +++ b/content/post/69.md @@ -5,8 +5,8 @@ author = "Gibheer" draft = true +++ -kleinere Hilfestellungen zu ipadm - -http://192.9.164.72/bin/view/Project+brussels/ifconfig_ipadm_feature_mapping -http://arc.opensolaris.org/caselog/PSARC/2010/080/materials/ipadm.1m.txt +kleinere Hilfestellungen zu ipadm + +http://192.9.164.72/bin/view/Project+brussels/ifconfig_ipadm_feature_mapping +http://arc.opensolaris.org/caselog/PSARC/2010/080/materials/ipadm.1m.txt http://blog.allanglesit.com/2011/03/solaris-11-network-configuration-basics/ diff --git a/content/post/70.md b/content/post/70.md index e7c6834..70f9744 100644 --- a/content/post/70.md +++ b/content/post/70.md @@ -5,13 +5,13 @@ author = "Gibheer" draft = false +++ -If you tried Solaris 11 or OpenIndiana in a fresh installation, you may have noticed, that pfexec may not work the way you are used to. I asked in #openindiana on `irc.freenode.org` and I was told, that the behavior was changed. OpenSolaris was used to have an `Primary Administrator` profile which got assigned to the first account created on the installation. The problem with that is the same as on Windows - you are doing everything with the administrator or root account. To avoid that, sudo was introduced, which needs the password of your account with the default settings. What both tools are very different at what they do and at what they are good at. So it's up to the administrator to define secure roles where appropriate and use sudo rules for the parts, which have to be more secured. - -If you want back the old behavior, these two steps should be enough. But keep in mind, that it is important that you secure your system, to avoid misuse. - -* there should be line like the following in `/etc/security/prof_attr` -`Primary Administrator:::Can perform all administrative tasks:auths=solaris.*,solaris.grant;help=RtPriAdmin.html` -* if there is, then you can add that profile to your user with -`usermod -P'Primary Administrator` - +If you tried Solaris 11 or OpenIndiana in a fresh installation, you may have noticed, that pfexec may not work the way you are used to. I asked in #openindiana on `irc.freenode.org` and I was told, that the behavior was changed. OpenSolaris was used to have an `Primary Administrator` profile which got assigned to the first account created on the installation. The problem with that is the same as on Windows - you are doing everything with the administrator or root account. To avoid that, sudo was introduced, which needs the password of your account with the default settings. What both tools are very different at what they do and at what they are good at. So it's up to the administrator to define secure roles where appropriate and use sudo rules for the parts, which have to be more secured. + +If you want back the old behavior, these two steps should be enough. But keep in mind, that it is important that you secure your system, to avoid misuse. + +* there should be line like the following in `/etc/security/prof_attr` +`Primary Administrator:::Can perform all administrative tasks:auths=solaris.*,solaris.grant;help=RtPriAdmin.html` +* if there is, then you can add that profile to your user with +`usermod -P'Primary Administrator` + It is possible to combine these two mechanics too. You could build a zone to ssh into the box with a key and from there, ssh with sudo and a password into the internal systems. diff --git a/content/post/71.md b/content/post/71.md index 9f55752..3124aa5 100644 --- a/content/post/71.md +++ b/content/post/71.md @@ -5,16 +5,16 @@ author = "Gibheer" draft = false +++ -If you are in the need to set an environment variable for an smf service, you are looking for envvar. It get's set in the `service` scope or in the `exec_method` scope. Here is a small example, how it's used. - -``` - - - - - - - -``` - +If you are in the need to set an environment variable for an smf service, you are looking for envvar. It get's set in the `service` scope or in the `exec_method` scope. Here is a small example, how it's used. + +``` + + + + + + + +``` + This example sets the environment variable `FOO` to bar. This is espacially useful, when you have to modify `PATH` or `LD_LIBRARY_PATH`. Just don't forget, that you did it. diff --git a/content/post/72.md b/content/post/72.md index 28c3642..27b7686 100644 --- a/content/post/72.md +++ b/content/post/72.md @@ -5,28 +5,28 @@ author = "Gibheer" draft = false +++ -This is mostly for myself that I can remember how to use the least documented feature of Solaris and openindiana - the `sysidcfg` files. - -These files help deploying new zones faster, as you don't have to configure them by hand afterwards. But what is the syntax and how can you use them? - -Here is an example file - - name_service=NONE - # name_service=DNS {domain_name= name_server=} - nfs4_domain=dynamic - timezone=Europe/Stockholm - terminal=xterms - root_password= - security_policy=NONE - network_interface= {primary hostname= default_route= ip_address= netmask= protocol_ipv6=yes} - network_interface= {hostname= ip_address= netmask= protocol_ipv6=yes default_route=NONE}` - -The most important thing first: you don't need system_locale after openindiana 151 anymore. If you have it in your config, even with C, delete it or else the setup will not work! - -If you don't have a dns record for your zone yet, set the @name_service@ to NONE. If you have already a record set, use the commented syntax. - -The next interesting setting is root_password. Here you don't input the password in cleartext but crypted. I wrote a little script to generate this string. You can find the code [here](https://github.com/Gibheer/zero-pwcrypter). - -The network_interface part is pretty easy, if you take these lines as a dummy. If you have only one interface, you can name the first interface PRIMARY. That way, you have a bit less to write. - +This is mostly for myself that I can remember how to use the least documented feature of Solaris and openindiana - the `sysidcfg` files. + +These files help deploying new zones faster, as you don't have to configure them by hand afterwards. But what is the syntax and how can you use them? + +Here is an example file + + name_service=NONE + # name_service=DNS {domain_name= name_server=} + nfs4_domain=dynamic + timezone=Europe/Stockholm + terminal=xterms + root_password= + security_policy=NONE + network_interface= {primary hostname= default_route= ip_address= netmask= protocol_ipv6=yes} + network_interface= {hostname= ip_address= netmask= protocol_ipv6=yes default_route=NONE}` + +The most important thing first: you don't need system_locale after openindiana 151 anymore. If you have it in your config, even with C, delete it or else the setup will not work! + +If you don't have a dns record for your zone yet, set the @name_service@ to NONE. If you have already a record set, use the commented syntax. + +The next interesting setting is root_password. Here you don't input the password in cleartext but crypted. I wrote a little script to generate this string. You can find the code [here](https://github.com/Gibheer/zero-pwcrypter). + +The network_interface part is pretty easy, if you take these lines as a dummy. If you have only one interface, you can name the first interface PRIMARY. That way, you have a bit less to write. + That's all so far. I will update this post, when I have figured out, what to fill into nfs4_domain and security_policy. diff --git a/content/post/73.md b/content/post/73.md index b328c5f..d0ea642 100644 --- a/content/post/73.md +++ b/content/post/73.md @@ -5,62 +5,62 @@ author = "Gibheer" draft = false +++ -This time, we are going to get routing working on the global zone for our other zones. You can replace the global zone with another zone too, as the setup is the same. - -What's needed? -============== - -First, we need to install ipfilter, if it isn't already installed. To do that, just invoke - - # pkg install ipfilter - -This will install the package filter and NAT engine. Latter is the part, we want to use now. - -We will asume, that the global zone has to interfaces with the following setup - -* bge0 -> 192.168.4.1/24 -* bge1 -> 192.168.5.1/24 - -configure ipnat -=============== - -With `ipnat` installed, we need to write a small configuration. For this example, we set up routing for every machine in the subnet. - -For that, open the file `/etc/ipf/ipnat.conf` and write the following lines: - - map bge0 192.168.5.0/24 -> 0/32 portmap tcp/udp auto - map bge0 192.168.5.0/24 -> 0/32 - -These two lines say, that all packages from the subnet to the rest shall be relabeled and forwarded. - -After that, all we need to do is enable the ipfilter and the routing deamons with the following commands. - - # svcadm enable ipfilter - # routeadm -e ipv4-forwarding - # routeadm -e ipv4-routing - # routeadm -u - -The last command checks if all deamons are running according to the settings. To see, which settings are set and what the deamons are doing, run the `routeadm` command without any arguments. - -configure the zone -================== - -Now we fire up the zone to test, if we can get anywhere near routing. In our case, the zone only has one interface, so that it detects the router itself per icmp. - -We can prove that very easy with - - # netstat -rn - -The default gateway should point to our global zone. To make a last test, you can ping an ip in another subnet. If the global zone says, this host is alive, the zone should do too. - -A good IP to test is 8.8.8.8, as it is really easy to remember. - -That was all. Have fun with your access - -links and hints -=============== - -You can get some more documentation to ipfilter and routing in the man pages of ipnat, ipf and routeadm. Some example rule sets for ipf can be found in `/usr/share/ipfilter/examples/nat.eg`. - -* [a rough setup of routing](http://blog.kevinvandervlist.nl/2011/06/openindiana-zone-with-nat/) +This time, we are going to get routing working on the global zone for our other zones. You can replace the global zone with another zone too, as the setup is the same. + +What's needed? +============== + +First, we need to install ipfilter, if it isn't already installed. To do that, just invoke + + # pkg install ipfilter + +This will install the package filter and NAT engine. Latter is the part, we want to use now. + +We will asume, that the global zone has to interfaces with the following setup + +* bge0 -> 192.168.4.1/24 +* bge1 -> 192.168.5.1/24 + +configure ipnat +=============== + +With `ipnat` installed, we need to write a small configuration. For this example, we set up routing for every machine in the subnet. + +For that, open the file `/etc/ipf/ipnat.conf` and write the following lines: + + map bge0 192.168.5.0/24 -> 0/32 portmap tcp/udp auto + map bge0 192.168.5.0/24 -> 0/32 + +These two lines say, that all packages from the subnet to the rest shall be relabeled and forwarded. + +After that, all we need to do is enable the ipfilter and the routing deamons with the following commands. + + # svcadm enable ipfilter + # routeadm -e ipv4-forwarding + # routeadm -e ipv4-routing + # routeadm -u + +The last command checks if all deamons are running according to the settings. To see, which settings are set and what the deamons are doing, run the `routeadm` command without any arguments. + +configure the zone +================== + +Now we fire up the zone to test, if we can get anywhere near routing. In our case, the zone only has one interface, so that it detects the router itself per icmp. + +We can prove that very easy with + + # netstat -rn + +The default gateway should point to our global zone. To make a last test, you can ping an ip in another subnet. If the global zone says, this host is alive, the zone should do too. + +A good IP to test is 8.8.8.8, as it is really easy to remember. + +That was all. Have fun with your access + +links and hints +=============== + +You can get some more documentation to ipfilter and routing in the man pages of ipnat, ipf and routeadm. Some example rule sets for ipf can be found in `/usr/share/ipfilter/examples/nat.eg`. + +* [a rough setup of routing](http://blog.kevinvandervlist.nl/2011/06/openindiana-zone-with-nat/) * [NAT on solaris](http://www.rite-group.com/rich/solaris_nat.html) diff --git a/content/post/74.md b/content/post/74.md index 1c89b53..96f5bdd 100644 --- a/content/post/74.md +++ b/content/post/74.md @@ -5,93 +5,93 @@ author = "Gibheer" draft = false +++ -In this short post, we will get a container running on a openindiana host. We will do some things in crossbow, but of the following stuff is just configuring the zone. At the end of this blog post, you will find some links to related pages. - -some preparations -================= - -Make sure, that you have a free vnic created with dladm to use in the zone or else, we will have no network available. Further, we need a place on the filesystem, where our zone can be created. We need 500MB to 1.5GB of free space. - -writing a zone configuration -============================ - -In the first step, we have to write a zone configuration. You can use zonecfg directly, but it's better to write it into a textfile and let zonecfg read that file. That way, you can check the configuration into a vcs of your choice. - -The config should look like this. - - create -b - set zonepath=/zones/zone1 - set ip-type=exclusive - set autoboot=false - add net - set physical=zone1 - end - commit - -With this configuration, we build a zone, which get's saved in `/zones`. `/zones` has to be a zfs partition or else the zone can not be created. - -The sixth line sets the network device for the zone to the vnic `zone1`. - -Now we feed the file to zonecfg and let it create *zone1*. - - # zonecfg -z zone1 -f zone1.conf - - -installation of the zone -======================== - -The next step is to install the zone with the command: - - # zoneadm -z zone1 install - -or clone it from a template with - - # zoneadm -z zone1 clone template_name - -Now we have to wait a bit and can write the next configuration file. - -writing a sysidcfg -================== - -I wrote a rough post about the [sysidcfg](http://zero-knowledge.org/post/72) already, so take a look there, if you are interested in further details. - -For this example, we use the following content. - - name_service=NONE - nfs4_domain=dynamic - terminal=xterms - # the password is foobar - root_password=0WMBUdFzAu6qU - security_policy=NONE - network_interface=zone1 { - primary - hostname=zone1 - default_route=NONE - ip_address=192.168.5.3 - netmask=255.255.255.0 - protocol_ipv6=no - } - - -booting the zone -================ - -When the installation process has ended, copy the file to `/zones/zone1/root/etc/sysidcfg`. This way, the zone can read the file on the first boot and set most of the stuff. - - # zoneadm -z zone1 boot - -To check if everything gets configured, log into the zone and check the output. - - # zlogin -e ! -C zone1 - -It will take some time until the zone is ready to use, but it should not ask for further details. When the prompt shows, the configuration completed. - -Now you can login into the zone and make further adjustments. Some topics will get their own blog entries here, so take a look at the other entries for help too. - -links -===== - -Here are some links for further details to this topic: - -* [crossbow example from c0t0d0s0](http://www.c0t0d0s0.org/archives/5355-Upcoming-Solaris-Features-Crossbow-Part-1-Virtualisation.html) +In this short post, we will get a container running on a openindiana host. We will do some things in crossbow, but of the following stuff is just configuring the zone. At the end of this blog post, you will find some links to related pages. + +some preparations +================= + +Make sure, that you have a free vnic created with dladm to use in the zone or else, we will have no network available. Further, we need a place on the filesystem, where our zone can be created. We need 500MB to 1.5GB of free space. + +writing a zone configuration +============================ + +In the first step, we have to write a zone configuration. You can use zonecfg directly, but it's better to write it into a textfile and let zonecfg read that file. That way, you can check the configuration into a vcs of your choice. + +The config should look like this. + + create -b + set zonepath=/zones/zone1 + set ip-type=exclusive + set autoboot=false + add net + set physical=zone1 + end + commit + +With this configuration, we build a zone, which get's saved in `/zones`. `/zones` has to be a zfs partition or else the zone can not be created. + +The sixth line sets the network device for the zone to the vnic `zone1`. + +Now we feed the file to zonecfg and let it create *zone1*. + + # zonecfg -z zone1 -f zone1.conf + + +installation of the zone +======================== + +The next step is to install the zone with the command: + + # zoneadm -z zone1 install + +or clone it from a template with + + # zoneadm -z zone1 clone template_name + +Now we have to wait a bit and can write the next configuration file. + +writing a sysidcfg +================== + +I wrote a rough post about the [sysidcfg](http://zero-knowledge.org/post/72) already, so take a look there, if you are interested in further details. + +For this example, we use the following content. + + name_service=NONE + nfs4_domain=dynamic + terminal=xterms + # the password is foobar + root_password=0WMBUdFzAu6qU + security_policy=NONE + network_interface=zone1 { + primary + hostname=zone1 + default_route=NONE + ip_address=192.168.5.3 + netmask=255.255.255.0 + protocol_ipv6=no + } + + +booting the zone +================ + +When the installation process has ended, copy the file to `/zones/zone1/root/etc/sysidcfg`. This way, the zone can read the file on the first boot and set most of the stuff. + + # zoneadm -z zone1 boot + +To check if everything gets configured, log into the zone and check the output. + + # zlogin -e ! -C zone1 + +It will take some time until the zone is ready to use, but it should not ask for further details. When the prompt shows, the configuration completed. + +Now you can login into the zone and make further adjustments. Some topics will get their own blog entries here, so take a look at the other entries for help too. + +links +===== + +Here are some links for further details to this topic: + +* [crossbow example from c0t0d0s0](http://www.c0t0d0s0.org/archives/5355-Upcoming-Solaris-Features-Crossbow-Part-1-Virtualisation.html) * [howto sysidcfg](http://zero-knowledge.org/post/72) diff --git a/content/post/75.md b/content/post/75.md index 7006ab2..5d4df7c 100644 --- a/content/post/75.md +++ b/content/post/75.md @@ -5,8 +5,8 @@ author = "Gibheer" draft = true +++ -Here comes a small hint for everybody else, who wants to run a ntp server in a zone: It does not work! - -The reason for that is, that ntp needs access to the time facility of the kernel. But only global zones are allowed to access this part of the kernel. But don't worry, you don't need a ntp client on the zones, as they get their time information from the global zone. - +Here comes a small hint for everybody else, who wants to run a ntp server in a zone: It does not work! + +The reason for that is, that ntp needs access to the time facility of the kernel. But only global zones are allowed to access this part of the kernel. But don't worry, you don't need a ntp client on the zones, as they get their time information from the global zone. + That cost me about 4 hours to find out. I hope, this could save you some time. diff --git a/content/post/77.md b/content/post/77.md index 1d59848..5db4932 100644 --- a/content/post/77.md +++ b/content/post/77.md @@ -5,133 +5,133 @@ author = "Gibheer" draft = false +++ -This time, we will build a base kerberos setup. At the end, you will be able to login into another machine using kerberos only. - -You need the following things, to make kerberos work: - -* a working dns server -* 2 servers - -I will explain this setup on an openindiana system with 2 zones. `kerberosp1` will be my kerberos machine and `sshp1` will be my ssh server with kerberos support. - -setup of kerberos -================= - -The setup of kerberos was pretty easy, after reading 3 tutorials about it. The essential part here is to decide, how the realm and the admin account should be called. - -To start the setup, call `kdcmgr`. At first, it asks your realm, which you should name like your domain. -After that, you have to generate an admin principal.A principal is like an account for a user or admin. But it's also used for services. I named mine `kerberosp1/admin`. Give it a safe password and you are done. - -Now you should have an populated `/etc/krb5/` directory. Open the file `kdc.conf` in that directory and search for `max_life`. It was set to 8 hours for me, which was too long. Adjust the value to 4h or 16h, like you want. I did the same with `max_renewable_life`. - -Edit: You should add the following option in the realms section to your realm. - - kpasswd_protocol = SET_CHANGE - -Kerberos uses a separate protocol for changing the password of principals. A RPC like protocol is used in the solaris version and microsoft has another one too. So the only option compatible on all is `SET_CHANGE`. But to make things worse, the solaris default does not even work in an internal network. So just add this entry and save some stress from trying to find out, why this is not working. - -setting up some accounts -======================== - -To use the kerberos service, check first, if the kdc is running and start it, if it's not. For openindiana, the check is - -`svcs krb5kdc` - -which should return online. - -After that, as root start the kerberos shell with `kadmin.local`. This is a management shell to create, delete and modify principals. -Here we are going to create some policies. With these, we can set some minimal standards, like the minimum password length. - -I created three policies. An `admin`, `user` and a `service` policy. These got the following settings: - -* admin - * minlength 8 - * minclasses 3 -* user - * minlength 8 - * minclasses 2 -* service - * minlength 12 - * minclasses 4 - -This sets some password limitations for every principal group I have. `minclasses` is used for different types of characters. There are lower case, upper case, numbers, punctation and other characters. -The create a new policy use the command `addpol` or `add_policy` with `-minlength` and `-minclasses`. You can simply type the command to get some help or read the man page. - -After creating the policies, we have to create some principals. First, we should create one for ourselves. You can do this with the command `addprinc` or `add_principal`. Give it a policy with the argument `-policy` and a name. You will have to input a password for this principal according to the policies. - -You can use this scheme to create user accounts too. For that, you can generate a password for them with the program `pwgen`. It's pretty helpful and can generate pretty complex passwords, so that should be best. - -Now we need a principal for our ssh server. The name of this principal should be `host/name_of_service.your.domain.name`, so in my case, it is `host/sshp1.prod.lan`. But I did not want to generate any password and added the argument `-randkey` which generates a password according to the policies we set. - -Now we have to export the key of the last principal into a keytab file, that can be read by the service, which wants to use it. This is done with the command `ktadd` like this - -`ktadd -k /etc/krb5.keytab host/sshp1.prod.lan` - -This generates our file in /etc/krb5.keytab. Copy this file into the kerberos directory (on openindiana it's `/etc/krb5/`) and delete the one on the kerberos host. This is important, as another execution of ktadd will append the next key to that file. - -setting up ssh -============== - -For making ssh work with kerberos, we need `/etc/krb5/krb5.conf` and `/etc/krb5/krb5.keytab`. In the step before, we already moved the `krb5.keytab`. We can copy the `krb5.conf` from the kerberos server to the ssh server. - -Now you can start the ssh deamon. - -try to log in -============= - -For the test, we will try to connect to the ssh host from the kerberos host. So start a shell on the kerberos server and type `kinit`. This should ask for your password. If it was correct, `klist` should show you, that you have been granted a ticket. - -Now try to open a ssh session to the server, with `-v` set for more informations and it should work. - -problems that can occur -======================= - -no default realm ----------------- - -The is the message - - kinit(v5): Configuration file does not specify default realm when parsing name gibheer - -which hints, that your `/etc/krb5/krb5.conf` is missing. - -client/principal not found --------------------------- - -The message - - kinit(v5): Client 'foo@PROD.LAN' not found in Kerberos database while getting initial credentials - -is a hint, that you forgot to add the principal or that your username could not be found. Just add the principal with `kadmin` and it should work. - -ssh does not use kerberos -------------------------- - -If ssh does not want to use kerberos at all, check for the GSSAPI options. These should be enabled by default, but can be disabled. If that's the case, add the following line to your `sshd_config`. - - GSSAPIAuthentication yes - -After a restart, ssh should use kerberos for authentication. - -links -===== - -* [setup of kerberos on opensolaris](http://www.linuxtopia.org/online_books/opensolaris_2008/SYSADV6/html/setup-148.html) -* [MIT kerberos page](http://web.mit.edu/kerberos/krb5-1.5/krb5-1.5.4/doc/krb5-admin/krb5_002econf.html) -* [KDC Setup on Solaris](http://wiki.creatica.org/cgi-bin/wiki.pl/Kerberos_KDC_server_on_Solaris) -* [Kerberos password](http://fnal.gov/docs/strongauth/princ_pw.html#46115) -* [Kerberos policies](http://pig.made-it.com/kerberos-policy.html) -* [Administrative Guide to Kerberos](http://techpubs.spinlocksolutions.com/dklar/kerberos.html#err_server_not_found) - -one last word -============= - -I have one last word for you: Kerberos does not do authorization! - -That means, that kerberos can not say, if one principal is allowed to use a service or not. It just manages the authentication for you. -If you want to manage the access, there are some possibilities for that. One is to use ldap, often used in conjunction with kerberos. Or you manage the `passwd` files or any other file yourself or you use a service like [chef](http://wiki.opscode.com/display/chef/Home) or [puppet](http://puppetlabs.com/). - -changelog -========= - +This time, we will build a base kerberos setup. At the end, you will be able to login into another machine using kerberos only. + +You need the following things, to make kerberos work: + +* a working dns server +* 2 servers + +I will explain this setup on an openindiana system with 2 zones. `kerberosp1` will be my kerberos machine and `sshp1` will be my ssh server with kerberos support. + +setup of kerberos +================= + +The setup of kerberos was pretty easy, after reading 3 tutorials about it. The essential part here is to decide, how the realm and the admin account should be called. + +To start the setup, call `kdcmgr`. At first, it asks your realm, which you should name like your domain. +After that, you have to generate an admin principal.A principal is like an account for a user or admin. But it's also used for services. I named mine `kerberosp1/admin`. Give it a safe password and you are done. + +Now you should have an populated `/etc/krb5/` directory. Open the file `kdc.conf` in that directory and search for `max_life`. It was set to 8 hours for me, which was too long. Adjust the value to 4h or 16h, like you want. I did the same with `max_renewable_life`. + +Edit: You should add the following option in the realms section to your realm. + + kpasswd_protocol = SET_CHANGE + +Kerberos uses a separate protocol for changing the password of principals. A RPC like protocol is used in the solaris version and microsoft has another one too. So the only option compatible on all is `SET_CHANGE`. But to make things worse, the solaris default does not even work in an internal network. So just add this entry and save some stress from trying to find out, why this is not working. + +setting up some accounts +======================== + +To use the kerberos service, check first, if the kdc is running and start it, if it's not. For openindiana, the check is + +`svcs krb5kdc` + +which should return online. + +After that, as root start the kerberos shell with `kadmin.local`. This is a management shell to create, delete and modify principals. +Here we are going to create some policies. With these, we can set some minimal standards, like the minimum password length. + +I created three policies. An `admin`, `user` and a `service` policy. These got the following settings: + +* admin + * minlength 8 + * minclasses 3 +* user + * minlength 8 + * minclasses 2 +* service + * minlength 12 + * minclasses 4 + +This sets some password limitations for every principal group I have. `minclasses` is used for different types of characters. There are lower case, upper case, numbers, punctation and other characters. +The create a new policy use the command `addpol` or `add_policy` with `-minlength` and `-minclasses`. You can simply type the command to get some help or read the man page. + +After creating the policies, we have to create some principals. First, we should create one for ourselves. You can do this with the command `addprinc` or `add_principal`. Give it a policy with the argument `-policy` and a name. You will have to input a password for this principal according to the policies. + +You can use this scheme to create user accounts too. For that, you can generate a password for them with the program `pwgen`. It's pretty helpful and can generate pretty complex passwords, so that should be best. + +Now we need a principal for our ssh server. The name of this principal should be `host/name_of_service.your.domain.name`, so in my case, it is `host/sshp1.prod.lan`. But I did not want to generate any password and added the argument `-randkey` which generates a password according to the policies we set. + +Now we have to export the key of the last principal into a keytab file, that can be read by the service, which wants to use it. This is done with the command `ktadd` like this + +`ktadd -k /etc/krb5.keytab host/sshp1.prod.lan` + +This generates our file in /etc/krb5.keytab. Copy this file into the kerberos directory (on openindiana it's `/etc/krb5/`) and delete the one on the kerberos host. This is important, as another execution of ktadd will append the next key to that file. + +setting up ssh +============== + +For making ssh work with kerberos, we need `/etc/krb5/krb5.conf` and `/etc/krb5/krb5.keytab`. In the step before, we already moved the `krb5.keytab`. We can copy the `krb5.conf` from the kerberos server to the ssh server. + +Now you can start the ssh deamon. + +try to log in +============= + +For the test, we will try to connect to the ssh host from the kerberos host. So start a shell on the kerberos server and type `kinit`. This should ask for your password. If it was correct, `klist` should show you, that you have been granted a ticket. + +Now try to open a ssh session to the server, with `-v` set for more informations and it should work. + +problems that can occur +======================= + +no default realm +---------------- + +The is the message + + kinit(v5): Configuration file does not specify default realm when parsing name gibheer + +which hints, that your `/etc/krb5/krb5.conf` is missing. + +client/principal not found +-------------------------- + +The message + + kinit(v5): Client 'foo@PROD.LAN' not found in Kerberos database while getting initial credentials + +is a hint, that you forgot to add the principal or that your username could not be found. Just add the principal with `kadmin` and it should work. + +ssh does not use kerberos +------------------------- + +If ssh does not want to use kerberos at all, check for the GSSAPI options. These should be enabled by default, but can be disabled. If that's the case, add the following line to your `sshd_config`. + + GSSAPIAuthentication yes + +After a restart, ssh should use kerberos for authentication. + +links +===== + +* [setup of kerberos on opensolaris](http://www.linuxtopia.org/online_books/opensolaris_2008/SYSADV6/html/setup-148.html) +* [MIT kerberos page](http://web.mit.edu/kerberos/krb5-1.5/krb5-1.5.4/doc/krb5-admin/krb5_002econf.html) +* [KDC Setup on Solaris](http://wiki.creatica.org/cgi-bin/wiki.pl/Kerberos_KDC_server_on_Solaris) +* [Kerberos password](http://fnal.gov/docs/strongauth/princ_pw.html#46115) +* [Kerberos policies](http://pig.made-it.com/kerberos-policy.html) +* [Administrative Guide to Kerberos](http://techpubs.spinlocksolutions.com/dklar/kerberos.html#err_server_not_found) + +one last word +============= + +I have one last word for you: Kerberos does not do authorization! + +That means, that kerberos can not say, if one principal is allowed to use a service or not. It just manages the authentication for you. +If you want to manage the access, there are some possibilities for that. One is to use ldap, often used in conjunction with kerberos. Or you manage the `passwd` files or any other file yourself or you use a service like [chef](http://wiki.opscode.com/display/chef/Home) or [puppet](http://puppetlabs.com/). + +changelog +========= + * added some explanation to `kpasswd_protocol` diff --git a/content/post/78.md b/content/post/78.md index fcd6ba5..83164c8 100644 --- a/content/post/78.md +++ b/content/post/78.md @@ -5,13 +5,13 @@ author = "Gibheer" draft = false +++ -There is a bug in openindiana that does not let you get the content of a page with curl, when it's secured with ssl. The cause of this is an option set on compile time. This option is the the path to the certificate storage. -In the case of openindiana this is set to `/etc/curl/curlCA`, but all certificates reside in `/etc/certs/CA/`. This leads to the following error message, when you try it: - - curl: (77) error setting certificate verify locations - -To fix this, run the following script. - - mkdir /etc/curl && cat /etc/certs/CA/*.pem > /etc/curl/curlCA - +There is a bug in openindiana that does not let you get the content of a page with curl, when it's secured with ssl. The cause of this is an option set on compile time. This option is the the path to the certificate storage. +In the case of openindiana this is set to `/etc/curl/curlCA`, but all certificates reside in `/etc/certs/CA/`. This leads to the following error message, when you try it: + + curl: (77) error setting certificate verify locations + +To fix this, run the following script. + + mkdir /etc/curl && cat /etc/certs/CA/*.pem > /etc/curl/curlCA + This writes all certificates of the default CA in the file curl is looking for and after that, it works. diff --git a/content/post/79.md b/content/post/79.md index f05a7c0..f7feafd 100644 --- a/content/post/79.md +++ b/content/post/79.md @@ -5,95 +5,95 @@ author = "Gibheer" draft = true +++ -Hey there! This time, we will get rubinius running on openindiana. As there is not package for llvm yet, it get's compiled within the build. - -I got it this far because of crsd. He told me how to get llvm running, so that we could get rubinius to compile. -After that [dbussink](https://twitter.com/#!/dbussink) got rbx to compile within two days! He found some really strange things, but in the end, rubinius can run on a solaris platform! - -requirements -============ - -But first, you have to fulfill some requirements. First you have to add the sfe publisher to get the gcc4. -You can do that with the command - - pkg set-publisher -O http://pkg.openindiana.org/sfe sfe - -After that install the following packages - -* developer/gcc-3 -* system/header -* system/library/math/header-math -* gnu-tar -* gnu-make -* gnu-binutils -* gnu-coreutils -* gnu-findutils -* gnu-diffutils -* gnu-grep -* gnu-patch -* gnu-sed -* gawk -* gnu-m4 -* bison -* git - -Yeah, that's alot of gnu, but we need it to get everything going. The cause of this are the old versions of solaris software, which do not support many features. The default compiler is even gcc 3.4.3! - -After you have installed these packages, install the following package from sfe. - -* runtime/gcc - -The order is important, as gcc3 and gcc4 set symlinks in /usr/bin. If you install them in another order, the symlink is not correct and you end up having a lot of work. - -some patching -============= - -After that, we have to fix a small bug in gcc with editing the file `/usr/include/spawn.h`. - - 73,76d72 - < #ifdef __cplusplus - < char *const *_RESTRICT_KYWD argv, - < char *const *_RESTRICT_KYWD envp); - < #else - 79d74 - < #endif - 86,89d80 - < #ifdef __cplusplus - < char *const *_RESTRICT_KYWD argv, - < char *const *_RESTRICT_KYWD envp); - < #else - 92d82 - < #endif - -This fixes a bug in gcc with [the __restrict key word](http://gcc.gnu.org/bugzilla/show_bug.cgi?id=49347). - -fix the path -============ - -Now that we installed and fix a bunch of things, we need to include the gnu path into our own. Use the following command to get this done - - export PATH="/usr/gnu/bin:$PATH" - -Yes, it needs to be at the first place or else one of the old solaris binaries get's chosen and then, nothing works and produces weired errors. - -getting rbx to compile -====================== - -with an own build ------------------ - -If you want to build rbx yourself, get the code from [https://github.com/rubinius/rubinius.git](https://github.com/rubinius/rubinius.git). After that, configure and rake and everything should be fine. - -with rvm ---------- - -If you want to get it working with rvm, install rvm like normal. After that you can simply install rbx with - - rvm install rbx - -That's all you need. - -conclusion -========== - +Hey there! This time, we will get rubinius running on openindiana. As there is not package for llvm yet, it get's compiled within the build. + +I got it this far because of crsd. He told me how to get llvm running, so that we could get rubinius to compile. +After that [dbussink](https://twitter.com/#!/dbussink) got rbx to compile within two days! He found some really strange things, but in the end, rubinius can run on a solaris platform! + +requirements +============ + +But first, you have to fulfill some requirements. First you have to add the sfe publisher to get the gcc4. +You can do that with the command + + pkg set-publisher -O http://pkg.openindiana.org/sfe sfe + +After that install the following packages + +* developer/gcc-3 +* system/header +* system/library/math/header-math +* gnu-tar +* gnu-make +* gnu-binutils +* gnu-coreutils +* gnu-findutils +* gnu-diffutils +* gnu-grep +* gnu-patch +* gnu-sed +* gawk +* gnu-m4 +* bison +* git + +Yeah, that's alot of gnu, but we need it to get everything going. The cause of this are the old versions of solaris software, which do not support many features. The default compiler is even gcc 3.4.3! + +After you have installed these packages, install the following package from sfe. + +* runtime/gcc + +The order is important, as gcc3 and gcc4 set symlinks in /usr/bin. If you install them in another order, the symlink is not correct and you end up having a lot of work. + +some patching +============= + +After that, we have to fix a small bug in gcc with editing the file `/usr/include/spawn.h`. + + 73,76d72 + < #ifdef __cplusplus + < char *const *_RESTRICT_KYWD argv, + < char *const *_RESTRICT_KYWD envp); + < #else + 79d74 + < #endif + 86,89d80 + < #ifdef __cplusplus + < char *const *_RESTRICT_KYWD argv, + < char *const *_RESTRICT_KYWD envp); + < #else + 92d82 + < #endif + +This fixes a bug in gcc with [the \_\_restrict key word](http://gcc.gnu.org/bugzilla/show_bug.cgi?id=49347). + +fix the path +============ + +Now that we installed and fix a bunch of things, we need to include the gnu path into our own. Use the following command to get this done + + export PATH="/usr/gnu/bin:$PATH" + +Yes, it needs to be at the first place or else one of the old solaris binaries get's chosen and then, nothing works and produces weired errors. + +getting rbx to compile +====================== + +with an own build +----------------- + +If you want to build rbx yourself, get the code from [https://github.com/rubinius/rubinius.git](https://github.com/rubinius/rubinius.git). After that, configure and rake and everything should be fine. + +with rvm +--------- + +If you want to get it working with rvm, install rvm like normal. After that you can simply install rbx with + + rvm install rbx + +That's all you need. + +conclusion +========== + After dbussink fixed all the errors, rbx compiles fine, when the toolchain is there. To get to this point was not easy, but we did it. So have a lot of fun with hacking on and using rubinius! diff --git a/content/post/80.md b/content/post/80.md index dbc9b3d..a63cbab 100644 --- a/content/post/80.md +++ b/content/post/80.md @@ -5,18 +5,18 @@ author = "Gibheer" draft = false +++ -If you encounter the following error with `make install` - - find: cycle detected for /lib/secure/32/ - find: cycle detected for /lib/crypto/32/ - find: cycle detected for /lib/32/ - find: cycle detected for /usr/lib/elfedit/32/ - find: cycle detected for /usr/lib/secure/32/ - find: cycle detected for /usr/lib/link_audit/32/ - find: cycle detected for /usr/lib/lwp/32/ - find: cycle detected for /usr/lib/locale/en_US.UTF-8/32/ - find: cycle detected for /usr/lib/locale/en_US.UTF-8/LO_LTYPE/32/ - find: cycle detected for /usr/lib/locale/en_US.UTF-8/LC_CTYPE/32/ - find: cycle detected for /usr/lib/32/ - +If you encounter the following error with `make install` + + find: cycle detected for /lib/secure/32/ + find: cycle detected for /lib/crypto/32/ + find: cycle detected for /lib/32/ + find: cycle detected for /usr/lib/elfedit/32/ + find: cycle detected for /usr/lib/secure/32/ + find: cycle detected for /usr/lib/link_audit/32/ + find: cycle detected for /usr/lib/lwp/32/ + find: cycle detected for /usr/lib/locale/en_US.UTF-8/32/ + find: cycle detected for /usr/lib/locale/en_US.UTF-8/LO_LTYPE/32/ + find: cycle detected for /usr/lib/locale/en_US.UTF-8/LC_CTYPE/32/ + find: cycle detected for /usr/lib/32/ + use `ginstall` in your Makefile instead of `install`. It seems just broken on solaris. diff --git a/content/post/81.md b/content/post/81.md index 8e86406..27bbe61 100644 --- a/content/post/81.md +++ b/content/post/81.md @@ -9,7 +9,7 @@ draft = true mein letztes System hat also über zwei Jahre gehalten. Ihr werdet euch vielleicht (noch/nicht) erinnern an:\ -http://zero-knowledge.org/post/25 +[Lustige Gehversuche mit ...](/post/25.md) Nun brachten mit die (un)glücklichen Umstände eines sterbenden Monitorkabels dazu mein geliebtes Hermelin gegen die Grinsekatze diff --git a/content/post/82.md b/content/post/82.md index 6b53729..445eac9 100644 --- a/content/post/82.md +++ b/content/post/82.md @@ -5,21 +5,21 @@ author = "Gibheer" draft = false +++ -Two weeks ago, I had a problem with installing rubygems on my laptop. Yesterday, another person had the same problem, so I will document what is wrong here. - -The problem itself manifests in the way, that it installs gems with the error message - - WARNING: You don't have /home/steven/.gem/rbx/1.8/bin in your PATH, - gem executables will not run. - -If you then want to use the binary provided with the gem, it will not work and it happens with all ruby versions, be it rubinius, jruby or 1.9. What makes it worse is the fact, that it only occurs on archlinux installations, till now. And it is not a problem of rvm! - -So if you are on archlinux, look into `/etc/gemrc`. There will be a line saying - - gemrc: --user-install - -To solve the problem, create a file `~/.gemrc` and put the line - - gemrc: - +Two weeks ago, I had a problem with installing rubygems on my laptop. Yesterday, another person had the same problem, so I will document what is wrong here. + +The problem itself manifests in the way, that it installs gems with the error message + + WARNING: You don't have /home/steven/.gem/rbx/1.8/bin in your PATH, + gem executables will not run. + +If you then want to use the binary provided with the gem, it will not work and it happens with all ruby versions, be it rubinius, jruby or 1.9. What makes it worse is the fact, that it only occurs on archlinux installations, till now. And it is not a problem of rvm! + +So if you are on archlinux, look into `/etc/gemrc`. There will be a line saying + + gemrc: --user-install + +To solve the problem, create a file `~/.gemrc` and put the line + + gemrc: + in it. By doing that, the file `/etc/gemrc` will be ignored. And if you are manipulating that file, look into [all the other options](http://docs.rubygems.org/read/chapter/11) you can set. diff --git a/content/post/83.md b/content/post/83.md index f4fee88..68bb04c 100644 --- a/content/post/83.md +++ b/content/post/83.md @@ -5,87 +5,87 @@ author = "Gibheer" draft = false +++ -After my openindiana server is already running for 4 months straight, I thought, I write a bit about the ecosystem of Illumos and its state. - -Illumos ecosystem -================= - -Illumos is the base system of which every distribution uses. It's more or less -the base system, like FreeBSD. With Solaris 11 being the original OpenSolaris, -Illumos is a fork of what was open source of OpenSolaris in 2010. - -The development on Illumos is pretty active and at the moment, there is no merge with the Solaris code base planned. Oracle distributed code after the Solaris 11 release, but it was mostly code which had to be distributed either way. So there were no updates on kernel or ZFS code. - -This has a huge impact on the future development of Illumos as everything has to be developed by contributors like Joyent, Nexenta and others. But it also has implications for most of the core features of Solaris, the most important ZFS. These are already noticeable with Solaris 11 having ZFS version 31 and FreeBSD and Illumos having version 28. This means, that neither FreeBSD nor Illumos can do something with a zpool created on a Solaris 11. This already makes a switch from one system to another difficult. - -But nevertheless the contributors to Illumos work to make it better. The largest part at the moment is to get Illumos compiling with GCC 4.6.1. At the first look, it seems like a minor problem, but OpenSolaris was not written to be built with GCC but with the proprietary SunStudio. As far as I could see, this has some major implications and raised huge holes in the code, which has to get fixed. -With that the base system is also upgraded from older versions of Perl and python, which also will be a longer process. - -Another huge part is the process of building packages. Solaris 10 and older used the SVR4 format. That was pretty simple and looked like rpm. OpenSolaris introduced a new format named IPS - Image Packaging System. This is also compatible with the SVR4 format. OpenSolaris had a pretty big infrastructure for building IPS packages, but it was lost when oracle acquired sun and shut it down. -The problem now is, how to build new packages. Some are using SVR4 to build the IPS packages, which works good and the repository already has a bunch of newer releases of many projects. -Another attempt was to use pkgsrc. This is a project of NetBSD and already supports Solaris. This attempt died pretty fast. They were not used like FreeBSD ports and also not for compiling the packages. -The third approach is to build a packing system on top of dpkg/apt. It is a collaboration between Nexenta, OpenIndiana and others. There is also a plan to build a new distribution out of it - named illumian. - -One major difference between Solaris 11 and Illumos is that Illumos has KVM. It got ported from Linux by Joyent and works pretty good. With this step, Illumos not only had Zones for virtualization but also a full virtualization to get Linux running. - -distribution ecosystem -====================== - -There are a bunch of distributions out there, trying to solve different problems. - -[Solaris 11 - the first cloud os][solaris11] ----------- - -Not so much a distribution of Illumos, but of the old OpenSolaris. Solaris 11 is a pretty good allround distribution. It is used from small systems to huge ones, running one application or some hundred on one machine. Some use it for storage and others to virtualize the hell out of it with zones and crossbow. - -[OpenIndiana - open source and enterprise][openindiana] ------------ - -OpenIndiana was one of the first distributions using the Illumos core. It is available as a server distribution and a desktop one. The server one is targeted for the same usage as Solaris 11. As OpenIndiana uses Illumos it also has support for KVM and therefore can be used as a platform to host many fully virtualized instances on top of ZFS and crossbow infrastructure. - -A problem at the moment is the pretty old software it offers. Most of the packages are from OpenSolaris and therefore nearly 2 years old. Most of them don't even get security patches. The reason for that is the packaging topic mentioned above. As long as they don't have a strategy, nothing will change here. The only option is to use the sfe repo at the moment. - -This may change in the future, because of the joint effort with Nexenta of packaging releases. - -OpenIndiana also has a desktop part which is targeted at ubuntu users wanting ZFS and time machine. As I used OpenSolaris already on a laptop I can only say "Yes, it works". But you have to decide yourself, if you can live with pretty old but stable software. And many projects are not even available in package form, so that one would have to compile it yourself. - -[Nexenta - enterprise storage for everyone][nexenta] -------- - -Nexenta is another distribution who switched to Illumos core pretty fast. It is intended to be used for storage systems, but can also be used for other kinds of servers. It also uses the debian package system and a gnu userland. It is available as a community edition and "enterprise" edition. - -The packages are a bit more up to date than the OpenIndiana ones. With the combined effort of both projects, they may keep closer to the actual releases. - -[illumian - illumos + debian package management][illumian] --------- - -Illumian is a new project and collaboration work between Nexenta and OpenIndiana. It will provide packages through the debian package management dpkg/apt. The target audience seems to be the same as OpenIndiana. The plan at the moment is to release all packages in the same version as in OpenIndiana, so that the ultimate choice will just be, if you want to use dpkg or IPS. - -[SmartOS - the complete modern operating system][smartos] -------- - -This is not so much a distribution as a live image. Its purpose is to use all disks in the server to create a zpool and use that to provide storage for virtual machines, be it zones or KVM instances. The KVM instances are also put into zones to attach dtrace to the virtual instances to see, what's going on in that instance. -SmartOS offers also pretty nice wrappers around the VM operating to get new instances up fast. - -The company behind SmartOS is Joyent, more known for building node.js. They use SmartOS as the central pillar of their own JoyentCloud, where they host node.js applications, databases and also Linux machines. - -[omnios][omnios] ------- - -OmniOS is a very new distribution and from OmniIT. It offers not much at the moment apart from an ISO image and a small wiki. -It is intended to be used much like FreeBSD. They provide a very stripped down Illumos core with updated packages as far as possible and nothing more. Every other package one might need has to be built and distributed through a package repository. The reason behind this is, that they only want to provide the basic image, which everybody needs, but not the packages needed only by themselves. And even these packages may be one or two versions behind. -And let me tell you - the packages they already updated may be considered bleeding edge by many debian stable users. - -What next? -========== - -This was the excursion into the world of Illumos based distributions. I myself will switch away from OpenIndiana. It's great, that Illumos lives and breathes more than 4 months ago, but there is much work left to do. SmartOS had a huge impact for me and others and Joyent and Nexenta do great work on improving the Ecosystem. -But it will be hard to get back to the times where OpenSolaris was. Too much time went by unused. But I'm looking forward what else might come up of Illumos land. - -[solaris11]: http://www.oracle.com/us/products/servers-storage/solaris/solaris11/overview/index.html "Solaris 11" -[illumos]: http://illumos.org/ "the illumos project" -[openindiana]: http://openindiana.org/ "OpenIndiana" -[smartos]: http://smartos.org/ "SmartOS - the complete modern operating system" -[illumian]: http://illumian.org/ "illumian" -[nexenta]: http://nexentastor.org/ "Nexenta - the storage platform" +After my openindiana server is already running for 4 months straight, I thought, I write a bit about the ecosystem of Illumos and its state. + +Illumos ecosystem +================= + +Illumos is the base system of which every distribution uses. It's more or less +the base system, like FreeBSD. With Solaris 11 being the original OpenSolaris, +Illumos is a fork of what was open source of OpenSolaris in 2010. + +The development on Illumos is pretty active and at the moment, there is no merge with the Solaris code base planned. Oracle distributed code after the Solaris 11 release, but it was mostly code which had to be distributed either way. So there were no updates on kernel or ZFS code. + +This has a huge impact on the future development of Illumos as everything has to be developed by contributors like Joyent, Nexenta and others. But it also has implications for most of the core features of Solaris, the most important ZFS. These are already noticeable with Solaris 11 having ZFS version 31 and FreeBSD and Illumos having version 28. This means, that neither FreeBSD nor Illumos can do something with a zpool created on a Solaris 11. This already makes a switch from one system to another difficult. + +But nevertheless the contributors to Illumos work to make it better. The largest part at the moment is to get Illumos compiling with GCC 4.6.1. At the first look, it seems like a minor problem, but OpenSolaris was not written to be built with GCC but with the proprietary SunStudio. As far as I could see, this has some major implications and raised huge holes in the code, which has to get fixed. +With that the base system is also upgraded from older versions of Perl and python, which also will be a longer process. + +Another huge part is the process of building packages. Solaris 10 and older used the SVR4 format. That was pretty simple and looked like rpm. OpenSolaris introduced a new format named IPS - Image Packaging System. This is also compatible with the SVR4 format. OpenSolaris had a pretty big infrastructure for building IPS packages, but it was lost when oracle acquired sun and shut it down. +The problem now is, how to build new packages. Some are using SVR4 to build the IPS packages, which works good and the repository already has a bunch of newer releases of many projects. +Another attempt was to use pkgsrc. This is a project of NetBSD and already supports Solaris. This attempt died pretty fast. They were not used like FreeBSD ports and also not for compiling the packages. +The third approach is to build a packing system on top of dpkg/apt. It is a collaboration between Nexenta, OpenIndiana and others. There is also a plan to build a new distribution out of it - named illumian. + +One major difference between Solaris 11 and Illumos is that Illumos has KVM. It got ported from Linux by Joyent and works pretty good. With this step, Illumos not only had Zones for virtualization but also a full virtualization to get Linux running. + +distribution ecosystem +====================== + +There are a bunch of distributions out there, trying to solve different problems. + +[Solaris 11 - the first cloud os][solaris11] +---------- + +Not so much a distribution of Illumos, but of the old OpenSolaris. Solaris 11 is a pretty good allround distribution. It is used from small systems to huge ones, running one application or some hundred on one machine. Some use it for storage and others to virtualize the hell out of it with zones and crossbow. + +[OpenIndiana - open source and enterprise][openindiana] +----------- + +OpenIndiana was one of the first distributions using the Illumos core. It is available as a server distribution and a desktop one. The server one is targeted for the same usage as Solaris 11. As OpenIndiana uses Illumos it also has support for KVM and therefore can be used as a platform to host many fully virtualized instances on top of ZFS and crossbow infrastructure. + +A problem at the moment is the pretty old software it offers. Most of the packages are from OpenSolaris and therefore nearly 2 years old. Most of them don't even get security patches. The reason for that is the packaging topic mentioned above. As long as they don't have a strategy, nothing will change here. The only option is to use the sfe repo at the moment. + +This may change in the future, because of the joint effort with Nexenta of packaging releases. + +OpenIndiana also has a desktop part which is targeted at ubuntu users wanting ZFS and time machine. As I used OpenSolaris already on a laptop I can only say "Yes, it works". But you have to decide yourself, if you can live with pretty old but stable software. And many projects are not even available in package form, so that one would have to compile it yourself. + +[Nexenta - enterprise storage for everyone][nexenta] +------- + +Nexenta is another distribution who switched to Illumos core pretty fast. It is intended to be used for storage systems, but can also be used for other kinds of servers. It also uses the debian package system and a gnu userland. It is available as a community edition and "enterprise" edition. + +The packages are a bit more up to date than the OpenIndiana ones. With the combined effort of both projects, they may keep closer to the actual releases. + +[illumian - illumos + debian package management][illumian] +-------- + +Illumian is a new project and collaboration work between Nexenta and OpenIndiana. It will provide packages through the debian package management dpkg/apt. The target audience seems to be the same as OpenIndiana. The plan at the moment is to release all packages in the same version as in OpenIndiana, so that the ultimate choice will just be, if you want to use dpkg or IPS. + +[SmartOS - the complete modern operating system][smartos] +------- + +This is not so much a distribution as a live image. Its purpose is to use all disks in the server to create a zpool and use that to provide storage for virtual machines, be it zones or KVM instances. The KVM instances are also put into zones to attach dtrace to the virtual instances to see, what's going on in that instance. +SmartOS offers also pretty nice wrappers around the VM operating to get new instances up fast. + +The company behind SmartOS is Joyent, more known for building node.js. They use SmartOS as the central pillar of their own JoyentCloud, where they host node.js applications, databases and also Linux machines. + +[omnios][omnios] +------ + +OmniOS is a very new distribution and from OmniIT. It offers not much at the moment apart from an ISO image and a small wiki. +It is intended to be used much like FreeBSD. They provide a very stripped down Illumos core with updated packages as far as possible and nothing more. Every other package one might need has to be built and distributed through a package repository. The reason behind this is, that they only want to provide the basic image, which everybody needs, but not the packages needed only by themselves. And even these packages may be one or two versions behind. +And let me tell you - the packages they already updated may be considered bleeding edge by many debian stable users. + +What next? +========== + +This was the excursion into the world of Illumos based distributions. I myself will switch away from OpenIndiana. It's great, that Illumos lives and breathes more than 4 months ago, but there is much work left to do. SmartOS had a huge impact for me and others and Joyent and Nexenta do great work on improving the Ecosystem. +But it will be hard to get back to the times where OpenSolaris was. Too much time went by unused. But I'm looking forward what else might come up of Illumos land. + +[solaris11]: http://www.oracle.com/us/products/servers-storage/solaris/solaris11/overview/index.html "Solaris 11" +[illumos]: http://illumos.org/ "the illumos project" +[openindiana]: http://openindiana.org/ "OpenIndiana" +[smartos]: http://smartos.org/ "SmartOS - the complete modern operating system" +[illumian]: http://illumian.org/ "illumian" +[nexenta]: http://nexentastor.org/ "Nexenta - the storage platform" [omnios]: http://omnios.omniti.com "OmniOS from OmniTI" diff --git a/content/post/84.md b/content/post/84.md index 7067449..ddfa3c8 100644 --- a/content/post/84.md +++ b/content/post/84.md @@ -5,18 +5,18 @@ author = "Gibheer" draft = false +++ -Okay, let's say you are a proud owner of a system and use ZFS. Now lets assume that you lost a disk from your storage and want a fast backup of your data without the hassle of packing up everything, checking for permissions and so on. If the target system has ZFS too, then this will be fun for you, because I will show you, how to make a backup of a ZFS partition and all its descendants in some small steps. - -First, you have to build a recursive snapshot for the backup. This can be done with - - zfs snapshot -r tank/testpartition@backup-today - -After that the real magic happens. We send this snapshot over ssh and import it on the other side. - - zfs send -R tank/testpartition@backup-today | ssh target.machine "zfs recv -u tank/backup-machine" - -Now all partitions from =tank/testpartition= will be put in =tank/backup-machine= and everything will be preserved. Links will be links, permissions will be the same. The flag =-u= is to prevent mounting the partitions on the target machine or else all partitions will be mounted as they were before. - -As this sends the complete dataset over the wire, it is not that usable for backups every day. For this use case, use incremental sends (with the option =-i=). On the receiving side, nothing changes. - +Okay, let's say you are a proud owner of a system and use ZFS. Now lets assume that you lost a disk from your storage and want a fast backup of your data without the hassle of packing up everything, checking for permissions and so on. If the target system has ZFS too, then this will be fun for you, because I will show you, how to make a backup of a ZFS partition and all its descendants in some small steps. + +First, you have to build a recursive snapshot for the backup. This can be done with + + zfs snapshot -r tank/testpartition@backup-today + +After that the real magic happens. We send this snapshot over ssh and import it on the other side. + + zfs send -R tank/testpartition@backup-today | ssh target.machine "zfs recv -u tank/backup-machine" + +Now all partitions from =tank/testpartition= will be put in =tank/backup-machine= and everything will be preserved. Links will be links, permissions will be the same. The flag =-u= is to prevent mounting the partitions on the target machine or else all partitions will be mounted as they were before. + +As this sends the complete dataset over the wire, it is not that usable for backups every day. For this use case, use incremental sends (with the option =-i=). On the receiving side, nothing changes. + Thanks at this point to [shl](http://blogs.interdose.com/sebastian/) for showing me ZFS. diff --git a/content/post/85.md b/content/post/85.md index 2d65fc8..89b2d30 100644 --- a/content/post/85.md +++ b/content/post/85.md @@ -5,30 +5,30 @@ author = "Stormwind" draft = false +++ -Hallo ihr, -========== - - -da habe ich es doch tatsächlich völlig verschwitzt letztes Jahr auch von dem Wollfest in Nierstein zu berichten. Also habe ich nun kurz entschlossen die Nummerierung etwas angepasst. - -Dieses Jahr bin ich schon ganz früh dran mit Wollfesten. (Im Übrigen nicht zu verwechseln mit dem bösen Wollfasten.) -In Backnang fand nämlich vorgestern und gestern das [2. Backnanger Wollfest](http://www.backnanger-wollfest.de/) statt und an dem Samstag war ich dann auch dabei. - -
-Wollbeute Teil 1

-
- -Somit konnte ich meine schon vorhandenen Wollvorräten mit noch mehr Wolle weiter aufstocken, wie man auf den Bildern unschwer erkennen kann. -Aus dem Hause Zitron gibt es jetzt ein tolles 4-fach Sockengarn, was aus zwei normalen weißen und zwei schon vorgefärbt schwarzen Fäden besteht, was bedeutet, dass es jetzt auch wunderschön dunkle Sockenwolle vom Wolldrachen gibt, die nicht zum Teil komplett Schwarz sein muss. War ja klar, dass ich mir da wieder zwei Stränge unter den Nagel reißen musste. - -Auch sehr schön, die beiden Kammzüge aus Seide einmal in grün und einmal in orange aus 100% Tussah Seide. (Ich schmachte dahin, es ist so wundervoll weich.) Da habe ich auch schon eine Idee was es werden soll, jetzt müsste es also nur noch versponnen und verzwirnt werden. Aber dazu fehlt mir noch den Faden, den ich zum verzwirnen benutzen möchte, den habe ich aber schon bestellt. Jetzt muss er nur noch hier ankommen. Ihr dürft also gespannt sein. Und ich bin es auch, obs am Ende so wird, wie ich das möchte. -Ich habe auch noch zwei weitere Kammzüge mitgebracht, allerdings sieht man den zweiten auf dem Foto nicht, da er schon zu 50% meinem Spinnrad zum Opfer gefallen ist. - -
-Wollbeute Teil 2 -
- -Und auch meinen Kater habe ich eine Freude gemacht, weil er nun einen neuen - wenngleich auch toten - Freund hat. Das Schaffell, was man im Hintergrund der Bilder sieht. Wobei er sich erst noch ein Bisschen daran gewöhnen muss. Ich glaube er hat vorher noch nie ein Fell gesehen und fand es erstmal gruselig bis er endlich einen Fuß bzw. Pfote darauf gesetzt hat. - -Bis denne, +Hallo ihr, +========== + + +da habe ich es doch tatsächlich völlig verschwitzt letztes Jahr auch von dem Wollfest in Nierstein zu berichten. Also habe ich nun kurz entschlossen die Nummerierung etwas angepasst. + +Dieses Jahr bin ich schon ganz früh dran mit Wollfesten. (Im Übrigen nicht zu verwechseln mit dem bösen Wollfasten.) +In Backnang fand nämlich vorgestern und gestern das [2. Backnanger Wollfest](http://www.backnanger-wollfest.de/) statt und an dem Samstag war ich dann auch dabei. + +
+Wollbeute Teil 1

+
+ +Somit konnte ich meine schon vorhandenen Wollvorräten mit noch mehr Wolle weiter aufstocken, wie man auf den Bildern unschwer erkennen kann. +Aus dem Hause Zitron gibt es jetzt ein tolles 4-fach Sockengarn, was aus zwei normalen weißen und zwei schon vorgefärbt schwarzen Fäden besteht, was bedeutet, dass es jetzt auch wunderschön dunkle Sockenwolle vom Wolldrachen gibt, die nicht zum Teil komplett Schwarz sein muss. War ja klar, dass ich mir da wieder zwei Stränge unter den Nagel reißen musste. + +Auch sehr schön, die beiden Kammzüge aus Seide einmal in grün und einmal in orange aus 100% Tussah Seide. (Ich schmachte dahin, es ist so wundervoll weich.) Da habe ich auch schon eine Idee was es werden soll, jetzt müsste es also nur noch versponnen und verzwirnt werden. Aber dazu fehlt mir noch den Faden, den ich zum verzwirnen benutzen möchte, den habe ich aber schon bestellt. Jetzt muss er nur noch hier ankommen. Ihr dürft also gespannt sein. Und ich bin es auch, obs am Ende so wird, wie ich das möchte. +Ich habe auch noch zwei weitere Kammzüge mitgebracht, allerdings sieht man den zweiten auf dem Foto nicht, da er schon zu 50% meinem Spinnrad zum Opfer gefallen ist. + +
+Wollbeute Teil 2 +
+ +Und auch meinen Kater habe ich eine Freude gemacht, weil er nun einen neuen - wenngleich auch toten - Freund hat. Das Schaffell, was man im Hintergrund der Bilder sieht. Wobei er sich erst noch ein Bisschen daran gewöhnen muss. Ich glaube er hat vorher noch nie ein Fell gesehen und fand es erstmal gruselig bis er endlich einen Fuß bzw. Pfote darauf gesetzt hat. + +Bis denne, Stormwind diff --git a/content/post/86.md b/content/post/86.md index 47718a0..2c291fd 100644 --- a/content/post/86.md +++ b/content/post/86.md @@ -5,10 +5,10 @@ author = "Gibheer" draft = false +++ -If you have synaptics and your application just keeps scrolling even after you stopped then put the following in your xorg.conf and it should stop that: - - Section "InputClass" - Option "CoastingSpeed" "0" - EndSection - +If you have synaptics and your application just keeps scrolling even after you stopped then put the following in your xorg.conf and it should stop that: + + Section "InputClass" + Option "CoastingSpeed" "0" + EndSection + That should help. diff --git a/content/post/87.md b/content/post/87.md index 077a465..ba4cd15 100644 --- a/content/post/87.md +++ b/content/post/87.md @@ -5,8 +5,8 @@ author = "Gibheer" draft = false +++ -Whenever you have the need to "patch" the path in a Makefile, you can do that with the following line. - - PATH := $(PATH):/optional/path - +Whenever you have the need to "patch" the path in a Makefile, you can do that with the following line. + + PATH := $(PATH):/optional/path + Use $(PATH) to get the PATH variable from the environment. `:=` is used to avoid circular dependencies in the form, that PATH gets reassigned over and over again. If you leave the colon, then make will inform you about this. diff --git a/content/post/88.md b/content/post/88.md index 670001e..43fd777 100644 --- a/content/post/88.md +++ b/content/post/88.md @@ -5,12 +5,12 @@ author = "Gibheer" draft = false +++ -If you ever have the need to generate an encrypted password to put it into scripts or anything else, then you can use the following SQL command to generate it: - - select 'md5'||md5('password'||'rolename'); - -or in shell - - echo -n 'passwordrolename' | md5sum | awk '{ print "md5" $1 }' - +If you ever have the need to generate an encrypted password to put it into scripts or anything else, then you can use the following SQL command to generate it: + + select 'md5'||md5('password'||'rolename'); + +or in shell + + echo -n 'passwordrolename' | md5sum | awk '{ print "md5" $1 }' + Important is, that your rolename is appended to your password and this construct then put into md5. The important piece in the source is [pg_md5_encrypt()](http://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/backend/libpq/md5.c) diff --git a/content/post/89.md b/content/post/89.md index 97ef33f..895653c 100644 --- a/content/post/89.md +++ b/content/post/89.md @@ -5,32 +5,32 @@ author = "Gibheer" draft = true +++ -One of the nicer things on Solaris is SMF, the service management facility. It is a replacement for the SysV init system. It's major features were parallel starting of services, built in service dependencies and pretty good reporting functionality. It also could pretty exactly report, which service failed and what other services were failing over that one. So the dependencies worked in starting and stopping direction. -One of the worst parts of it was the horrible XML file you had to write for a service to work. I never found them very intuitive and too complex to "just" start and stop a service. - -It was a pretty good experience and I found it confusing on linux, that no init system was the same. On gentoo all init scripts have a status, only some have on debian. One has to take care of logging yourself and dependencies were either managed through a start order (in archlinux in /etc/rc.conf), by alphabetical order and runlevels (in gentoo) or they built their own system into the init scripts as comments (the debian way). - -Then came ubuntu und built their own init system named upstart. It is a bit between the old SysV init system as it uses init like script files but extended them for some further information, like when to start a service. But most of the file still is mostly script code and gets executed by a shell. -One of the largest change for old users is, that this system can't make use of `/etc/init.d/scriptname start` anymore. Instead you use `start service` or `stop service`. I think, this is a pretty good API and much better for new users to learn. -It can react on events to start and stop services and also can try to restart failed services. Services can also communicate with upstart over dbus. - -Systemd on the other hand is a very different to SysV. At first I was a bit skeptical because of all the negativity. But now I run 3 systems with systemd and till now, it feels good. These three systems are all archlinux boxes and there the migrations is very easy. You can take a look at the [wiki page][archlinux-wiki-systemd]. You can run systemd parallel to SysV init and start it with a parameter in grub. If you feel confident enough you can uninstall the old system and use only systemd in the future. - -So what exactly has systemd, what SysV is missing? - -It uses udev (or udev systemd?) pretty intensive, so that it can show you information about your hardware. -Another thing is, that it can start the daemons in parallel. This alone cut my boot time in half. But it is not that important to me. It was just a wow effect. -Next is the management of the service themselves. The command `systemctl` is the API to everything in systemd, so not as nice as upstart. But with just `systemctl` you already get an overview which services are currently loaded, running and which failed (here is a [gist of what it looks like][github-systemctl-output]). You can start and stop services with `systemctl start` or `systemctl stop` but this will make it not permanent. If you come from solaris and SMF you will find it confusing, as an _enable_ or _disable_ will activate and start the service, or disable and stop the service. But I found it pretty helpful when switchting from one thing to another and I just don't want to stop the service. -Think about switching the login manager. You may have installed the new one, enable it to start at boot time and disable the old one. Now you can still do other things and wait with the restart or you switch to the terminal and stop the old one and start the new one. I waited and did some configuration and afterwards, restarted the hole machine. Did not take me any longer than switching to the console. -Another feature is, that systemd has an included logger. For some it may be not a feature as they already use a configured syslog-ng and don't worry, you can still use it. The program is called journalctl and is bundled with systemd. Systemd starts every deamon within the logger context, so that every program does not have to worry about logging anymore but can just spout it onto STDOUT and systemd takes care of the rest. Yes, it sounds pretty retarded, but with it comes a pretty neat feature. If you now call `systemctl status service` you get an overview of the status of the service including the last couple log entries! As systemd builds automatic cgroups it can also show our the daemon dependencies. You can see how it looks like for [net-auto-wireless][github-systemctl-status] on my laptop. - -And I would say, that this is by far the best systemd could give me. If a service dies, I need to know all these things and this is the first system, which can show me. Not even SMF could do that. It would just tell me that my service died and others died of it as well, but it could not tell me exactly why or show me the logs. - -Yes, my machines all boot faster and I profit from it. I shut down my pcs now. But that is nothing in comparison to the context I get when looking at a service. I can only recommend for everyone to try systemd and look for yourself, if it may help you get your work done faster. - -At the moment fedora, Suse and Mandriva seem to be only one using systemd by default. There are other distributions having packages for them. From debian I know, that you [can't replace][debian-systemd] the SysV as easy as on archlinux. But it seems to work. There are already many service files out there for various daemons so it seems to get moving pretty fast. It will be interesting how fast it will be adopted by various distributions. - -[archlinux-wiki-systemd]: https://wiki.archlinux.org/index.php/Systemd -[github-systemctl-output]: https://gist.github.com/3180643#file_gistfile1.txt -[github-systemctl-status]: https://gist.github.com/3180643#file_systemctl_status_net_auto_wireless +One of the nicer things on Solaris is SMF, the service management facility. It is a replacement for the SysV init system. It's major features were parallel starting of services, built in service dependencies and pretty good reporting functionality. It also could pretty exactly report, which service failed and what other services were failing over that one. So the dependencies worked in starting and stopping direction. +One of the worst parts of it was the horrible XML file you had to write for a service to work. I never found them very intuitive and too complex to "just" start and stop a service. + +It was a pretty good experience and I found it confusing on linux, that no init system was the same. On gentoo all init scripts have a status, only some have on debian. One has to take care of logging yourself and dependencies were either managed through a start order (in archlinux in /etc/rc.conf), by alphabetical order and runlevels (in gentoo) or they built their own system into the init scripts as comments (the debian way). + +Then came ubuntu und built their own init system named upstart. It is a bit between the old SysV init system as it uses init like script files but extended them for some further information, like when to start a service. But most of the file still is mostly script code and gets executed by a shell. +One of the largest change for old users is, that this system can't make use of `/etc/init.d/scriptname start` anymore. Instead you use `start service` or `stop service`. I think, this is a pretty good API and much better for new users to learn. +It can react on events to start and stop services and also can try to restart failed services. Services can also communicate with upstart over dbus. + +Systemd on the other hand is a very different to SysV. At first I was a bit skeptical because of all the negativity. But now I run 3 systems with systemd and till now, it feels good. These three systems are all archlinux boxes and there the migrations is very easy. You can take a look at the [wiki page][archlinux-wiki-systemd]. You can run systemd parallel to SysV init and start it with a parameter in grub. If you feel confident enough you can uninstall the old system and use only systemd in the future. + +So what exactly has systemd, what SysV is missing? + +It uses udev (or udev systemd?) pretty intensive, so that it can show you information about your hardware. +Another thing is, that it can start the daemons in parallel. This alone cut my boot time in half. But it is not that important to me. It was just a wow effect. +Next is the management of the service themselves. The command `systemctl` is the API to everything in systemd, so not as nice as upstart. But with just `systemctl` you already get an overview which services are currently loaded, running and which failed (here is a [gist of what it looks like][github-systemctl-output]). You can start and stop services with `systemctl start` or `systemctl stop` but this will make it not permanent. If you come from solaris and SMF you will find it confusing, as an _enable_ or _disable_ will activate and start the service, or disable and stop the service. But I found it pretty helpful when switchting from one thing to another and I just don't want to stop the service. +Think about switching the login manager. You may have installed the new one, enable it to start at boot time and disable the old one. Now you can still do other things and wait with the restart or you switch to the terminal and stop the old one and start the new one. I waited and did some configuration and afterwards, restarted the hole machine. Did not take me any longer than switching to the console. +Another feature is, that systemd has an included logger. For some it may be not a feature as they already use a configured syslog-ng and don't worry, you can still use it. The program is called journalctl and is bundled with systemd. Systemd starts every deamon within the logger context, so that every program does not have to worry about logging anymore but can just spout it onto STDOUT and systemd takes care of the rest. Yes, it sounds pretty retarded, but with it comes a pretty neat feature. If you now call `systemctl status service` you get an overview of the status of the service including the last couple log entries! As systemd builds automatic cgroups it can also show our the daemon dependencies. You can see how it looks like for [net-auto-wireless][github-systemctl-status] on my laptop. + +And I would say, that this is by far the best systemd could give me. If a service dies, I need to know all these things and this is the first system, which can show me. Not even SMF could do that. It would just tell me that my service died and others died of it as well, but it could not tell me exactly why or show me the logs. + +Yes, my machines all boot faster and I profit from it. I shut down my pcs now. But that is nothing in comparison to the context I get when looking at a service. I can only recommend for everyone to try systemd and look for yourself, if it may help you get your work done faster. + +At the moment fedora, Suse and Mandriva seem to be only one using systemd by default. There are other distributions having packages for them. From debian I know, that you [can't replace][debian-systemd] the SysV as easy as on archlinux. But it seems to work. There are already many service files out there for various daemons so it seems to get moving pretty fast. It will be interesting how fast it will be adopted by various distributions. + +[archlinux-wiki-systemd]: https://wiki.archlinux.org/index.php/Systemd +[github-systemctl-output]: https://gist.github.com/3180643#file_gistfile1.txt +[github-systemctl-status]: https://gist.github.com/3180643#file_systemctl_status_net_auto_wireless [debian-systemd]: http://wiki.debian.org/systemd#Issue_.231:_sysvinit_vs._systemd-sysv diff --git a/content/post/90.md b/content/post/90.md index e382623..6db369a 100644 --- a/content/post/90.md +++ b/content/post/90.md @@ -5,20 +5,20 @@ author = "Gibheer" draft = false +++ -To rotate logs on a Solaris system you have to configure logadm to do it. - -This is a small example on how it could look like for lighttpd. - -Execute the two following statements to create two log entries - - logadm -w /var/lighttpd/1.4/logs/access.log -p 1d -C 8 -a 'pkill -HUP lighttpd; true' - logadm -w /var/lighttpd/1.4/logs/error.log -p 1d -C 8 -a 'pkill -HUP lighttpd; true' - -After that, there should be two new entries in `/etc/logadm.conf` with all parameters you gave to logadm. The parameters mean, that the logs will be rotated once a day and 8 old logfiles will be stored. With every rotation lighttpd will be reloaded to use the new empty log file. For more parameters read the man page of logadm. there are also some nice examples at the bottom. - -To try it out and see if it runs, create enough log entries and then call logadm with the logfile. In this case it would be - - logadm /var/lighttpd/1.4/logs/access.log - logadm /var/lighttpd/1.4/logs/error.log - +To rotate logs on a Solaris system you have to configure logadm to do it. + +This is a small example on how it could look like for lighttpd. + +Execute the two following statements to create two log entries + + logadm -w /var/lighttpd/1.4/logs/access.log -p 1d -C 8 -a 'pkill -HUP lighttpd; true' + logadm -w /var/lighttpd/1.4/logs/error.log -p 1d -C 8 -a 'pkill -HUP lighttpd; true' + +After that, there should be two new entries in `/etc/logadm.conf` with all parameters you gave to logadm. The parameters mean, that the logs will be rotated once a day and 8 old logfiles will be stored. With every rotation lighttpd will be reloaded to use the new empty log file. For more parameters read the man page of logadm. there are also some nice examples at the bottom. + +To try it out and see if it runs, create enough log entries and then call logadm with the logfile. In this case it would be + + logadm /var/lighttpd/1.4/logs/access.log + logadm /var/lighttpd/1.4/logs/error.log + After that, it should have created new log files and reloaded lighttpd. diff --git a/content/post/91.md b/content/post/91.md index c97daf5..4d2aefd 100644 --- a/content/post/91.md +++ b/content/post/91.md @@ -5,14 +5,14 @@ author = "Gibheer" draft = false +++ -After I switched everywhere to a tiling wm, I wondered how everybody else locks his screen. Sure, you can lock the screen with a keybinding, but what when you leave the pc for talking and then leave it be? - -The tool I found is [xautolock][xautolock] and works pretty good. After a configurable time span it starts the lock and after another time it can also start suspend, hibernate or whatever. I use it with the following settings: - - xautolock -locker slock -time 2 -killer "systemctl suspend" -killtime 10 & - -This starts slock, the simple locker, after two minutes and sends the pc into suspend after 10 minutes in activity. As it runs in the background, you can either start it through .xinitrc or with your wm of choice. - -To lock the screen by command, bind `xautolock -locknow` to your keys and it calls the deamon which then calls the locker. - +After I switched everywhere to a tiling wm, I wondered how everybody else locks his screen. Sure, you can lock the screen with a keybinding, but what when you leave the pc for talking and then leave it be? + +The tool I found is [xautolock][xautolock] and works pretty good. After a configurable time span it starts the lock and after another time it can also start suspend, hibernate or whatever. I use it with the following settings: + + xautolock -locker slock -time 2 -killer "systemctl suspend" -killtime 10 & + +This starts slock, the simple locker, after two minutes and sends the pc into suspend after 10 minutes in activity. As it runs in the background, you can either start it through .xinitrc or with your wm of choice. + +To lock the screen by command, bind `xautolock -locknow` to your keys and it calls the deamon which then calls the locker. + [xautolock]: http://freecode.com/projects/xautolock diff --git a/content/post/92.md b/content/post/92.md index 0dc268d..d3612a2 100644 --- a/content/post/92.md +++ b/content/post/92.md @@ -5,90 +5,90 @@ author = "Gibheer" draft = false +++ -Just out of curiosity I tried to build a service for PostgreSQL and the systemd init system. Before that, I only read the service files of postgres and dhcp delivered with Archlinux. What I wanted to build is a service file able to start multiple instances of postgres with separate configuration files. - -This was much easier than I thought it would be. - -Systemd supports that pretty well and the only thing to do, is add an '@' to the service file name. Everything after '@' is then put into a variable `%I`, which can be used in the service file. So my service file was named 'pg@.service' and I put it into `/etc/systemd/system`. Another possible location is `/usr/lib/systemd/system/`. - -The service file looks like an `.ini` file. It has the three sections Unit, Service and Install. The section Install in which target the service is installed. Targets are like run levels in other init systems. The postgres service gets installed into the multi-user target, which is started after the network: - - [Install] - WantedBy=multi-user.target - -The next part is Unit. This section describes the service with a short description and a description of the dependencies. Postgres just needs the network up, so this section looks like this: - - [Unit] - Description=run PostgreSQL instance %I - After=network.target - -There you can also see the %I, which is replaced with the part after '@' from the name in systemd. - -The next section is a bit larger and describes everything needed to manage the service itself, like start, stop and reload. - - [Service] - User=postgres - Group=postgres - TimeoutSec=120 - Type=forking - - EnvironmentFile=/etc/conf.d/pg.%I - - SyslogIdentifier=postgres-%i - - ExecStartPre=/usr/bin/postgresql-check-db-dir ${pgdata} - ExecStart= /usr/bin/pg_ctl -s -D ${pgdata} start -w -t 120 - ExecReload=/usr/bin/pg_ctl -s -D ${pgdata} reload - ExecStop= /usr/bin/pg_ctl -s -D ${pgdata} stop -m fast - - OOMScoreAdjust=-200 - -Okay, this is a bit longer than the other parts. The first Couple of options handle the user to start with and he startup timeout. The timeout can't be replaced with a variable because all options from the config will be loaded as environment variables on execution. The Type option is very important, because it can't be set to anything else as forking for postgres, because it will fork to the background. So if you start it as a simple service systemd would loose the handler to postgres and stop it immediately. - -The next options are EnvironmentFile and SyslogIdentifier. The first is for a small config file in `/etc/conf.d/pg.instance` where you replace instance with the instance name. As you can see with the %I in place, it will fill up the full name with the instance identifier. So you can use different config files for different instances. The same happens to the SyslogIdentifier. I thought it would be awesome if the log can be showed per instance and this is what you need to make it happen. - -The option OOMScoreAdjust is just an option for the OOMKiller, that it should leave postgres alone as much as possible. - -The option ExecStartPre calls a script which is delivered with postgres on Archlinux and does a check for the data dir. If it does not exist, it will log a line on how to create it. Pretty neat. ExecStart, ExecStop and ExecReload describe the actions to be done, when the service should be started, stopped or reloaded. As you can see, the script uses `${pgdata}` to determine where to look and that variable comes from the EnvironmentFile, which looks for my first instance like this - - pgdata=/tmp/ins1 - -The file is saved as `/etc/conf.d/pg.ins1` and is really nothing more than this. The rest can handle postgres itself. - -Now how do we get the service file into systemd? You do a - - systemctl --system daemon-reload - -and then - - systemctl start pg@ins1.service - -This creates your first service and tries to start it. You will get an error message like the following - - Job for pg@ins1.service failed. See 'systemctl status pg@ins1.service' and 'journalctl' for details. - -If you run the status, you will see that it failed, how it failed and the log message from the check script. After that, you can create the instance and start it anew there it is. - - # systemctl status pg@ins1.service - pg@ins1.service - PostgreSQL database server - Loaded: loaded (/etc/systemd/system/pg@ins1.service; disabled) - Active: active (running) since Tue, 25 Sep 2012 09:27:54 +0200; 3 days ago - Process: 372 ExecStop=/usr/bin/pg_ctl -s -D ${PGROOT}/data stop -m fast (code=exited, status=0/SUCCESS) - Process: 624 ExecStart=/usr/bin/pg_ctl -s -D ${PGROOT}/data start -w -t 120 (code=exited, status=0/SUCCESS) - Process: 619 ExecStartPre=/usr/bin/postgresql-check-db-dir ${PGROOT}/data (code=exited, status=0/SUCCESS) - Main PID: 627 (postgres) - CGroup: name=systemd:/system/postgresql.service - ├ 627 /usr/bin/postgres -D /var/lib/postgres/data - ├ 629 postgres: checkpointer process - ├ 630 postgres: writer process - ├ 631 postgres: wal writer process - ├ 632 postgres: autovacuum launcher process - └ 633 postgres: stats collector process - -Now if you want to see some logging, you can ask `journcalctl` and give it the log string. - - journcalctl SYSLOG_IDENTIFIER=postgres-instance1 - -That's all there is to multi instance services on syslog. To figure everything out actually took not even much time, as the documentation is pretty good. Just a hint, don't look in the web for documentation but in the man pages. The best starting point to look for documentation is `man systemd` and then take a look at the _SEE ALSO_ section. - +Just out of curiosity I tried to build a service for PostgreSQL and the systemd init system. Before that, I only read the service files of postgres and dhcp delivered with Archlinux. What I wanted to build is a service file able to start multiple instances of postgres with separate configuration files. + +This was much easier than I thought it would be. + +Systemd supports that pretty well and the only thing to do, is add an '@' to the service file name. Everything after '@' is then put into a variable `%I`, which can be used in the service file. So my service file was named 'pg@.service' and I put it into `/etc/systemd/system`. Another possible location is `/usr/lib/systemd/system/`. + +The service file looks like an `.ini` file. It has the three sections Unit, Service and Install. The section Install in which target the service is installed. Targets are like run levels in other init systems. The postgres service gets installed into the multi-user target, which is started after the network: + + [Install] + WantedBy=multi-user.target + +The next part is Unit. This section describes the service with a short description and a description of the dependencies. Postgres just needs the network up, so this section looks like this: + + [Unit] + Description=run PostgreSQL instance %I + After=network.target + +There you can also see the %I, which is replaced with the part after '@' from the name in systemd. + +The next section is a bit larger and describes everything needed to manage the service itself, like start, stop and reload. + + [Service] + User=postgres + Group=postgres + TimeoutSec=120 + Type=forking + + EnvironmentFile=/etc/conf.d/pg.%I + + SyslogIdentifier=postgres-%i + + ExecStartPre=/usr/bin/postgresql-check-db-dir ${pgdata} + ExecStart= /usr/bin/pg_ctl -s -D ${pgdata} start -w -t 120 + ExecReload=/usr/bin/pg_ctl -s -D ${pgdata} reload + ExecStop= /usr/bin/pg_ctl -s -D ${pgdata} stop -m fast + + OOMScoreAdjust=-200 + +Okay, this is a bit longer than the other parts. The first Couple of options handle the user to start with and he startup timeout. The timeout can't be replaced with a variable because all options from the config will be loaded as environment variables on execution. The Type option is very important, because it can't be set to anything else as forking for postgres, because it will fork to the background. So if you start it as a simple service systemd would loose the handler to postgres and stop it immediately. + +The next options are EnvironmentFile and SyslogIdentifier. The first is for a small config file in `/etc/conf.d/pg.instance` where you replace instance with the instance name. As you can see with the %I in place, it will fill up the full name with the instance identifier. So you can use different config files for different instances. The same happens to the SyslogIdentifier. I thought it would be awesome if the log can be showed per instance and this is what you need to make it happen. + +The option OOMScoreAdjust is just an option for the OOMKiller, that it should leave postgres alone as much as possible. + +The option ExecStartPre calls a script which is delivered with postgres on Archlinux and does a check for the data dir. If it does not exist, it will log a line on how to create it. Pretty neat. ExecStart, ExecStop and ExecReload describe the actions to be done, when the service should be started, stopped or reloaded. As you can see, the script uses `${pgdata}` to determine where to look and that variable comes from the EnvironmentFile, which looks for my first instance like this + + pgdata=/tmp/ins1 + +The file is saved as `/etc/conf.d/pg.ins1` and is really nothing more than this. The rest can handle postgres itself. + +Now how do we get the service file into systemd? You do a + + systemctl --system daemon-reload + +and then + + systemctl start pg@ins1.service + +This creates your first service and tries to start it. You will get an error message like the following + + Job for pg@ins1.service failed. See 'systemctl status pg@ins1.service' and 'journalctl' for details. + +If you run the status, you will see that it failed, how it failed and the log message from the check script. After that, you can create the instance and start it anew there it is. + + # systemctl status pg@ins1.service + pg@ins1.service - PostgreSQL database server + Loaded: loaded (/etc/systemd/system/pg@ins1.service; disabled) + Active: active (running) since Tue, 25 Sep 2012 09:27:54 +0200; 3 days ago + Process: 372 ExecStop=/usr/bin/pg_ctl -s -D ${PGROOT}/data stop -m fast (code=exited, status=0/SUCCESS) + Process: 624 ExecStart=/usr/bin/pg_ctl -s -D ${PGROOT}/data start -w -t 120 (code=exited, status=0/SUCCESS) + Process: 619 ExecStartPre=/usr/bin/postgresql-check-db-dir ${PGROOT}/data (code=exited, status=0/SUCCESS) + Main PID: 627 (postgres) + CGroup: name=systemd:/system/postgresql.service + ├ 627 /usr/bin/postgres -D /var/lib/postgres/data + ├ 629 postgres: checkpointer process + ├ 630 postgres: writer process + ├ 631 postgres: wal writer process + ├ 632 postgres: autovacuum launcher process + └ 633 postgres: stats collector process + +Now if you want to see some logging, you can ask `journcalctl` and give it the log string. + + journcalctl SYSLOG_IDENTIFIER=postgres-instance1 + +That's all there is to multi instance services on syslog. To figure everything out actually took not even much time, as the documentation is pretty good. Just a hint, don't look in the web for documentation but in the man pages. The best starting point to look for documentation is `man systemd` and then take a look at the _SEE ALSO_ section. + Have fun! diff --git a/content/post/93.md b/content/post/93.md index 9c48d0e..a21cb69 100644 --- a/content/post/93.md +++ b/content/post/93.md @@ -5,49 +5,49 @@ author = "Gibheer" draft = false +++ -Today we release a small project from me - [zero][zero-github] 0.1.0. - -It is aimed at being a toolkit for building web services. It is build around the idea of abstracting away what is tedious work and work with the information clearer. With that in mind, some modules are already included. These are the following. - -Request -------- - -This class provides an interface to information regarding the request and making them available grouped together in other parts. - -One example is the grouping of parameters in one attribute `request.params` which makes *get* and *post* parameters available separately. As many developers also regard URLs as some kind of parameters it is possible to define custom key/value pairs. - -Another example is the grouping of all accept headers under `#accept`, which makes accessing type information nice to read: `request.accept.types`. - -Response --------- - -The response class helps in building responses in a easy way. It provides similar interface as the `Rack::Response`. It also does a small check already for status code correctness and will probably get more helpers in the future to set common headers and status codes. - -Router ------- - -Zero has a small router which takes routes and pushes the request to other applications. A similar implementation is Rack::URLMap, but this router is also able to extract variables from the URL and puts them into the Request. - -Renderer --------- - -The renderer is a facility to render templates according to the accept type wanted by the client. That is possible by providing a map of short type names to the actual mimetypes which is then used internally to search for the right template. With this, developers have only to provide the template and mapping and then the rendering takes care of actually using it. - -The router does not take care of the actual method of the request. This can be done before the router with a small application defining routers for every possible method or working with the method in the applications called by the routers. - -Controller ----------- - -This component is still a "Work in Progress" but can already be used to glue all these parts together. It splits the work into two steps - processing the request and rendering the resulting data. If that workflow does not fit, it is also possible to extend or modify the controller to adapt other work flows. - -Status and Future of the toolset --------------------------------- - -The idea is to make web service development easier than before and this is only the beginning of zero. The plan is to extend the toolkit with more modules and make all modules possibly available as stand alone modules. That way, zero can be used in other projects to replace parts or help in making developing easier. It is a Work in Progress at the moment but the results so far are very promising. - -We also have a [repository for example applications][zero-examples] and we will extend it with the time to show as many aspects of the toolkit as possible. - -If you are interested, checkout [zero on github][zero-github] or have a look at the [examples][zero-examples]. - -[zero-github]: https://github.com/Gibheer/zero +Today we release a small project from me - [zero][zero-github] 0.1.0. + +It is aimed at being a toolkit for building web services. It is build around the idea of abstracting away what is tedious work and work with the information clearer. With that in mind, some modules are already included. These are the following. + +Request +------- + +This class provides an interface to information regarding the request and making them available grouped together in other parts. + +One example is the grouping of parameters in one attribute `request.params` which makes *get* and *post* parameters available separately. As many developers also regard URLs as some kind of parameters it is possible to define custom key/value pairs. + +Another example is the grouping of all accept headers under `#accept`, which makes accessing type information nice to read: `request.accept.types`. + +Response +-------- + +The response class helps in building responses in a easy way. It provides similar interface as the `Rack::Response`. It also does a small check already for status code correctness and will probably get more helpers in the future to set common headers and status codes. + +Router +------ + +Zero has a small router which takes routes and pushes the request to other applications. A similar implementation is Rack::URLMap, but this router is also able to extract variables from the URL and puts them into the Request. + +Renderer +-------- + +The renderer is a facility to render templates according to the accept type wanted by the client. That is possible by providing a map of short type names to the actual mimetypes which is then used internally to search for the right template. With this, developers have only to provide the template and mapping and then the rendering takes care of actually using it. + +The router does not take care of the actual method of the request. This can be done before the router with a small application defining routers for every possible method or working with the method in the applications called by the routers. + +Controller +---------- + +This component is still a "Work in Progress" but can already be used to glue all these parts together. It splits the work into two steps - processing the request and rendering the resulting data. If that workflow does not fit, it is also possible to extend or modify the controller to adapt other work flows. + +Status and Future of the toolset +-------------------------------- + +The idea is to make web service development easier than before and this is only the beginning of zero. The plan is to extend the toolkit with more modules and make all modules possibly available as stand alone modules. That way, zero can be used in other projects to replace parts or help in making developing easier. It is a Work in Progress at the moment but the results so far are very promising. + +We also have a [repository for example applications][zero-examples] and we will extend it with the time to show as many aspects of the toolkit as possible. + +If you are interested, checkout [zero on github][zero-github] or have a look at the [examples][zero-examples]. + +[zero-github]: https://github.com/Gibheer/zero [zero-examples]: https://github.com/Gibheer/zero-examples diff --git a/content/post/94.md b/content/post/94.md index 1b4e2e9..c33d46e 100644 --- a/content/post/94.md +++ b/content/post/94.md @@ -5,60 +5,60 @@ author = "Gibheer" draft = false +++ -Today someone told me about natural and inner joins. As I'm using SQL for many years already, I was a bit puzzled at first. I heard of the terms, but thought till now, that they were meaning the same. - -The first thing I did was looking in the [PostgreSQL documentation][pg-doc] and yes, they are not the same. But they are also the same. - -The inner join is the default for doing joins. It just joins two tables using the on clause. - - # select * - from tableA A - join tableB B - on A.id = B.id; - - | a.id | b.id | - |------|------| - | 3 | 3 | - -Now an interesting thing is, that the on clause can be replaced by a using clause when both tables provide the same columns. This not only makes the select a bit shorter, but also reduces the number of columns in the result. All columns listed in the using clause will be left out from the result and replaced with a new column with the name used in the using clause. The select from above would then look like this - - # select * - from tableA A - join tableB B - using (id); - - | id | - |----| - | 3 | - -The natural join goes one step further and tries to search for common columns itself and generate a using clause itself. The resulting query then looks like this - - # select * - from tableA A - natural join tableB B; - - | id | - |----| - | 3 | - -As nice as this seems it can backfire pretty fast, when one has two tables with a column of the same name, but completely different content not meant to be joined. Then it is possible just to get nothing. - - # select * from foo limit 1; - id | test | bar - ----+------+----- - 1 | 1 | 3 - - # select * from baz limit 1; - id | test | bar - ----+------+----- - 1 | 1 | 20 - - # select * from foo natural join baz - id | test | bar - ----+------+----- - -As all columns are named the same, but the content is different in column bar, no common row is found and therefore returned. - -For further information, the [PostgreSQL documentation][pg-doc] is pretty good. - +Today someone told me about natural and inner joins. As I'm using SQL for many years already, I was a bit puzzled at first. I heard of the terms, but thought till now, that they were meaning the same. + +The first thing I did was looking in the [PostgreSQL documentation][pg-doc] and yes, they are not the same. But they are also the same. + +The inner join is the default for doing joins. It just joins two tables using the on clause. + + # select * + from tableA A + join tableB B + on A.id = B.id; + + | a.id | b.id | + |------|------| + | 3 | 3 | + +Now an interesting thing is, that the on clause can be replaced by a using clause when both tables provide the same columns. This not only makes the select a bit shorter, but also reduces the number of columns in the result. All columns listed in the using clause will be left out from the result and replaced with a new column with the name used in the using clause. The select from above would then look like this + + # select * + from tableA A + join tableB B + using (id); + + | id | + |----| + | 3 | + +The natural join goes one step further and tries to search for common columns itself and generate a using clause itself. The resulting query then looks like this + + # select * + from tableA A + natural join tableB B; + + | id | + |----| + | 3 | + +As nice as this seems it can backfire pretty fast, when one has two tables with a column of the same name, but completely different content not meant to be joined. Then it is possible just to get nothing. + + # select * from foo limit 1; + id | test | bar + ----+------+----- + 1 | 1 | 3 + + # select * from baz limit 1; + id | test | bar + ----+------+----- + 1 | 1 | 20 + + # select * from foo natural join baz + id | test | bar + ----+------+----- + +As all columns are named the same, but the content is different in column bar, no common row is found and therefore returned. + +For further information, the [PostgreSQL documentation][pg-doc] is pretty good. + [pg-doc]: http://www.postgresql.org/docs/current/static/queries-table-expressions.html diff --git a/content/post/95.md b/content/post/95.md index ddb454f..0c95577 100644 --- a/content/post/95.md +++ b/content/post/95.md @@ -5,22 +5,22 @@ author = "Gibheer" draft = false +++ -This is some kind of hint for others, which may have the same problems I had. - -I wanted to compile [llvm](http://llvm.org/) 3.1 on omnios, an illumos distribution but it did not work out like I wanted it to. One of the first errors I got was a linking error. - - Text relocation remains referenced - against symbol offset in file - llvm::LoopBase::getLoopPredecessor() const 0x149a /tmp/build_gibheer/llvm-3.1.src/Release/lib/libLLVMCodeGen.a(MachineLICM.o) - llvm::LoopBase::getExitBlocks(llvm::SmallVectorImpl&) const 0x6200 /tmp/build_gibheer/llvm-3.1.src/Release/lib/libLLVMCodeGen.a(MachineLICM.o) - ld: fatal: relocations remain against allocatable but non-writable sections - -The problem in this case is, that parts of the llvm code are not compiled position independent (PIC). As I learned, this can be solved with the following setting. - - LDFLAGS="-mimpure-text -Wl,-ztextwarn" - -This changes the way of linking to only warn about position independent code, but still link it all together. It is not a nice solution, but with this, it is possible to find out, where it is gooing wrong. - -After that problem partially solved, I had another problem. Solaris supports 32bit and 64bit programs on the same environment, just like you can do on linux with multilib. The first compile of llvm produced 32bit binaries. When trying to compile llvm for 64bit, it was just impossible. I tried different things, like setting `CFLAGS`, `LDFLAGS`, `OTHER_OPTIONS` whatever there was and the only thing to get it compiled for 64bit is to overwrite `CC` and `CXX`. It seems like the Makefile just ignores the CFLAGS and therefore does only compile the code for the hostsystem seemingly bitness. - +This is some kind of hint for others, which may have the same problems I had. + +I wanted to compile [llvm](http://llvm.org/) 3.1 on omnios, an illumos distribution but it did not work out like I wanted it to. One of the first errors I got was a linking error. + + Text relocation remains referenced + against symbol offset in file + llvm::LoopBase::getLoopPredecessor() const 0x149a /tmp/build_gibheer/llvm-3.1.src/Release/lib/libLLVMCodeGen.a(MachineLICM.o) + llvm::LoopBase::getExitBlocks(llvm::SmallVectorImpl&) const 0x6200 /tmp/build_gibheer/llvm-3.1.src/Release/lib/libLLVMCodeGen.a(MachineLICM.o) + ld: fatal: relocations remain against allocatable but non-writable sections + +The problem in this case is, that parts of the llvm code are not compiled position independent (PIC). As I learned, this can be solved with the following setting. + + LDFLAGS="-mimpure-text -Wl,-ztextwarn" + +This changes the way of linking to only warn about position independent code, but still link it all together. It is not a nice solution, but with this, it is possible to find out, where it is gooing wrong. + +After that problem partially solved, I had another problem. Solaris supports 32bit and 64bit programs on the same environment, just like you can do on linux with multilib. The first compile of llvm produced 32bit binaries. When trying to compile llvm for 64bit, it was just impossible. I tried different things, like setting `CFLAGS`, `LDFLAGS`, `OTHER_OPTIONS` whatever there was and the only thing to get it compiled for 64bit is to overwrite `CC` and `CXX`. It seems like the Makefile just ignores the CFLAGS and therefore does only compile the code for the hostsystem seemingly bitness. + But both of these problems got solved with 3.2, which I tried from svn and they work. The release date of 3.2 is only 7 days away, so hopefully it will still work by then. Nice thing is, [Rubinius](https://github.com/rubinius/rubinius) can already use it :D diff --git a/content/post/96.md b/content/post/96.md index 826a0bf..0e42e60 100644 --- a/content/post/96.md +++ b/content/post/96.md @@ -5,140 +5,140 @@ author = "Gibheer" draft = false +++ -I got an interesting question regarding zones on Solaris in #omnios. - -> scarcry: Does anyone know how to move a zone from one zpool to another? - -There are some guides out there on how to move a zone from one machine to -another, but most of them install the zone in the same place as before. - -But instead of moving it from one machine to another, this small guide will just -show what to do, when only the location is chaning. - -preparations ------------- - -First, we need to setup the partitions and zones for our little experiment. For -this example, I will use the pool `rpool` and the following partitions - -* `rpool/zones/old` mounted to `/zones/old/` -* `rpool/zones/new` mounted to `/zones/new/` - -We also need the zone config, so here is it. - - create -b - set zonepath=/zones/old/zone1 - set ip-type=exclusive - set autoboot=false - add net - set physical=zone1 - end - commit - -Just install the zone with the normal commands - - $ zonecfg -z zone1 < zone.config - $ zoneadm -z zone1 install - $ zoneadm -z zone1 boot - -Check if the zone is running and write a file, just to make sure, we have the -same zone at the end. - -moving the zone ---------------- - -For this guide, we will assume, that the zone is in production use and can't be -offline too long. For that to work, we will do a first snapshot, when the zone -is still running. - - $ zfs snapshot -r rpool/zones/old/zone1@move1 - -After that, we can replay that snapshot into the new location. - - $ zfs send -R rpool/zones/old/zone1@move1 | zfs recv rpool/zones/new/zone1 - -This step will take some time, depending on the size of your zone. Now we stop -the the zone and detach it. - - $ zoneadm -z zone1 halt - $ zoneadm -z zone1 detach - -This frees the zfs partition from the zone and makes it accessible. We need that -a bit later. -Now we need an incremental snapshot and move that data to the new location. - - $ zfs snapshot -r rpool/zones/old/zone1@move2 - $ zfs send -R -i move1 rpool/zones/old/zone1@move2 | zfs recv rpool/zones/new/zone1 - -When we now list all zfs partitions, we see, that a partition zbe is mounted two -times into the same location. - - rpool/zones/old/zone1/ROOT/zbe 724M 1.59T 723M /zones/old/zone1/root - rpool/zones/new/zone1/ROOT/zbe 724M 1.59T 723M /zones/old/zone1/root - -To fix that, issue the following command. - - zfs set mountpoint=/zones/new/zone1/root rpool/zones/new/zone1/ROOT/zbe - -Now the partition has to be mounted, so that zoneadm can find it for the attach. -You can do that with the following command - - zfs mount rpool/zones/new/zone1/ROOT/zbe - -Now with the partition in the correct place, we have to tell the zone, where to -look for its new partition. - - $ zonecfg -z zone1 - zonecfg:zone1> set zonepath=/zones/new/zone1 - zonecfg:zone1> verify - zonecfg:zone1> commit - zonecfg:zone1> exit - -With the zone reconfigured, attach the zone. - - $ zoneadm -z zone1 attach - -This may take a bit of time, as the content of the zone gets checked for -compatibility. When it is back, check the zone is installed. - - $ zoneadm list -cv - ID NAME STATUS PATH BRAND IP - - zone1 installed /zones/new/zone1 ipkg excl - -Now boot the zone and we are done. - - $ zoneadm -z zone1 boot - -Now check if everything is where you expect it to be and start your services and -everything is good. - -ideas ------ - -Here are some ideas, what can be done differently in the process. - -### **iterative snapshots** - -If you zone has a lot of traffic, where many changes aggregate between the first -snapshot and the second, do some more iterative snapshots before taking down the -zone. -This has the advantage, that you can close the gap of changes to a minimum size -and therefore make the move at the end a bit faster. But check the available -disk space in the process to avoid a full disk. - -### **create a new zone** - -Instead of chaning the old zone and therefore making a rollback more complicated, -create a new zone, which looks exactly like the old one. -Instead of chaning the old one, do instead - - $ zonecfg -z zone2 - zonecfg:zone2> create -a /zones/new/zone1 - -This will set everything from the old zone with the new zonepath. Keep in mind, -that this will also use the old interface. If you don't want that, create a new -interface before and change it in the config step. - -You can also restore that zfs partition in a partition which has the correct. - +I got an interesting question regarding zones on Solaris in #omnios. + +> scarcry: Does anyone know how to move a zone from one zpool to another? + +There are some guides out there on how to move a zone from one machine to +another, but most of them install the zone in the same place as before. + +But instead of moving it from one machine to another, this small guide will just +show what to do, when only the location is chaning. + +preparations +------------ + +First, we need to setup the partitions and zones for our little experiment. For +this example, I will use the pool `rpool` and the following partitions + +* `rpool/zones/old` mounted to `/zones/old/` +* `rpool/zones/new` mounted to `/zones/new/` + +We also need the zone config, so here is it. + + create -b + set zonepath=/zones/old/zone1 + set ip-type=exclusive + set autoboot=false + add net + set physical=zone1 + end + commit + +Just install the zone with the normal commands + + $ zonecfg -z zone1 < zone.config + $ zoneadm -z zone1 install + $ zoneadm -z zone1 boot + +Check if the zone is running and write a file, just to make sure, we have the +same zone at the end. + +moving the zone +--------------- + +For this guide, we will assume, that the zone is in production use and can't be +offline too long. For that to work, we will do a first snapshot, when the zone +is still running. + + $ zfs snapshot -r rpool/zones/old/zone1@move1 + +After that, we can replay that snapshot into the new location. + + $ zfs send -R rpool/zones/old/zone1@move1 | zfs recv rpool/zones/new/zone1 + +This step will take some time, depending on the size of your zone. Now we stop +the the zone and detach it. + + $ zoneadm -z zone1 halt + $ zoneadm -z zone1 detach + +This frees the zfs partition from the zone and makes it accessible. We need that +a bit later. +Now we need an incremental snapshot and move that data to the new location. + + $ zfs snapshot -r rpool/zones/old/zone1@move2 + $ zfs send -R -i move1 rpool/zones/old/zone1@move2 | zfs recv rpool/zones/new/zone1 + +When we now list all zfs partitions, we see, that a partition zbe is mounted two +times into the same location. + + rpool/zones/old/zone1/ROOT/zbe 724M 1.59T 723M /zones/old/zone1/root + rpool/zones/new/zone1/ROOT/zbe 724M 1.59T 723M /zones/old/zone1/root + +To fix that, issue the following command. + + zfs set mountpoint=/zones/new/zone1/root rpool/zones/new/zone1/ROOT/zbe + +Now the partition has to be mounted, so that zoneadm can find it for the attach. +You can do that with the following command + + zfs mount rpool/zones/new/zone1/ROOT/zbe + +Now with the partition in the correct place, we have to tell the zone, where to +look for its new partition. + + $ zonecfg -z zone1 + zonecfg:zone1> set zonepath=/zones/new/zone1 + zonecfg:zone1> verify + zonecfg:zone1> commit + zonecfg:zone1> exit + +With the zone reconfigured, attach the zone. + + $ zoneadm -z zone1 attach + +This may take a bit of time, as the content of the zone gets checked for +compatibility. When it is back, check the zone is installed. + + $ zoneadm list -cv + ID NAME STATUS PATH BRAND IP + - zone1 installed /zones/new/zone1 ipkg excl + +Now boot the zone and we are done. + + $ zoneadm -z zone1 boot + +Now check if everything is where you expect it to be and start your services and +everything is good. + +ideas +----- + +Here are some ideas, what can be done differently in the process. + +### **iterative snapshots** + +If you zone has a lot of traffic, where many changes aggregate between the first +snapshot and the second, do some more iterative snapshots before taking down the +zone. +This has the advantage, that you can close the gap of changes to a minimum size +and therefore make the move at the end a bit faster. But check the available +disk space in the process to avoid a full disk. + +### **create a new zone** + +Instead of chaning the old zone and therefore making a rollback more complicated, +create a new zone, which looks exactly like the old one. +Instead of chaning the old one, do instead + + $ zonecfg -z zone2 + zonecfg:zone2> create -a /zones/new/zone1 + +This will set everything from the old zone with the new zonepath. Keep in mind, +that this will also use the old interface. If you don't want that, create a new +interface before and change it in the config step. + +You can also restore that zfs partition in a partition which has the correct. + I hope it helps and you have some fun playing with it. diff --git a/content/post/97.md b/content/post/97.md index d085e5f..91a70ba 100644 --- a/content/post/97.md +++ b/content/post/97.md @@ -5,29 +5,29 @@ author = "Gibheer" draft = false +++ -I had the need to filter logs from different programs into different places - in this case the postgres and nginx logs. The man page of `syslog.conf` describes it pretty good, but misses some examples to make it more clear. So here is how I configured it, to make it easier. - -First, I edited the `syslog.conf` - - # filter everything apart from postgres and nginx - !-postgres,nginx - *.err;kern.warning;auth.notice;mail.crit /dev/console - # and all the other stuff - - # filter only postgres - !postgres - *.* /var/log/postgresql.log - - # filter only nginx - !nginx - *.* /var/log/nginx.log - -The next step is to setup the log rotate. This happens in `/etc/newsyslog.conf`. The man page is very helpful, so if you want to adjust something, take a peek into it. - - # postgresql - /var/log/postgresql.log 640 5 100 * JC - - # nginx - /var/log/nginx.log 640 5 100 * JC - +I had the need to filter logs from different programs into different places - in this case the postgres and nginx logs. The man page of `syslog.conf` describes it pretty good, but misses some examples to make it more clear. So here is how I configured it, to make it easier. + +First, I edited the `syslog.conf` + + # filter everything apart from postgres and nginx + !-postgres,nginx + *.err;kern.warning;auth.notice;mail.crit /dev/console + # and all the other stuff + + # filter only postgres + !postgres + *.* /var/log/postgresql.log + + # filter only nginx + !nginx + *.* /var/log/nginx.log + +The next step is to setup the log rotate. This happens in `/etc/newsyslog.conf`. The man page is very helpful, so if you want to adjust something, take a peek into it. + + # postgresql + /var/log/postgresql.log 640 5 100 * JC + + # nginx + /var/log/nginx.log 640 5 100 * JC + And that is all. If you want to add more program filtes, you have to define them in the `syslog.conf` as *notfilter* and *filter* and add the rotate to `newsyslog.conf`. diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..14883b2 --- /dev/null +++ b/go.mod @@ -0,0 +1,5 @@ +module git.zero-knowledge.org/gibheer/zblog + +go 1.18 + +require github.com/russross/blackfriday/v2 v2.1.0 diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..502a072 --- /dev/null +++ b/go.sum @@ -0,0 +1,2 @@ +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= diff --git a/main.go b/main.go new file mode 100644 index 0000000..6e75326 --- /dev/null +++ b/main.go @@ -0,0 +1,217 @@ +package main + +import ( + "bytes" + "flag" + "fmt" + "html/template" + "io" + "io/fs" + "log" + "net/http" + "os" + "path" + "path/filepath" + "time" + + "github.com/russross/blackfriday/v2" +) + +var ( + contentDir = flag.String("content-dir", "content", "path to the content directory") + staticDir = flag.String("static-dir", "static", "path to the static files") + templateDir = flag.String("template-dir", "templates", "path to the template directory") + outputDir = flag.String("output-dir", "", "path to output all files from the render process") + listen = flag.String("listen", "", "When provided with a listen port, start serving the content") +) + +type ( + Metadata struct { + URLPath string + FilePath string + Template string + Title string + Date time.Time + Author string + Draft bool + } +) + +func main() { + flag.Parse() + var err error + + tmplDirFS := os.DirFS(*templateDir) + templates := template.New("") + templates = templates.Funcs(template.FuncMap( + map[string]interface{}{ + "formatTime": func(t time.Time) string { + return t.Format("2006-01-02") + }, + }, + )) + templates, err = templates.ParseFS(tmplDirFS, "*") + if err != nil { + log.Fatalf("could not parse template files: %s", err) + } + + content := []Metadata{} + if err := filepath.Walk(*contentDir, func(path string, info fs.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + m, err := MetadataFromFile(*contentDir, path) + if err != nil { + return fmt.Errorf("could not parse metadata from '%s': %w", path, err) + } + content = append(content, m) + return nil + }); err != nil { + log.Fatalf("could not read content: %s", err) + } + + if *outputDir != "" { + for _, metadata := range content { + p := *outputDir + metadata.URLPath + if p[len(p)-1] == '/' { + p = path.Join(p, "index.html") + } + + // create directory + if _, err := os.Stat(path.Dir(p)); os.IsNotExist(err) { + if err := os.MkdirAll(path.Dir(p), 0755); err != nil { + log.Fatalf("could not create directory '%s': %s", path.Dir(p), err) + } + } + + f, err := os.Create(p) + if err != nil { + log.Fatalf("could not create new file '%s': %s", p, err) + } + defer f.Close() + if err := metadata.Render(f, templates); err != nil { + log.Fatalf("could not render '%s': %s", metadata.FilePath, err) + } + f.Close() + } + } + + if *listen != "" { + http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir(*staticDir)))) + for _, metadata := range content { + func(m Metadata) { + http.HandleFunc(m.URLPath, func(w http.ResponseWriter, r *http.Request) { + log.Printf("%s -> %s", r.URL, m.URLPath) + w.Header()["Content-Type"] = []string{"text/html"} + if err := m.Render(w, templates); err != nil { + log.Printf("could not render '%s': %s", m.FilePath, err) + } + }) + }(metadata) + } + log.Fatalf("stopped listening: %s", http.ListenAndServe(*listen, nil)) + } + + if *outputDir == "" && *listen == "" { + log.Printf("neither output-dir nor listen are requested - doing nothing") + } +} + +var ( + metadataStart = []byte("+++\n") + metadataEnd = []byte("\n+++\n") + + headerTitle = "title" + headerDate = "date" + headerAuthor = "author" + headerURLPath = "url" + headerDraft = "draft" + headerTemplate = "template" +) + +// ContentFromFile reads the header of the file to create the metadata. +// +// basePath is stripped from the path when generating the default URL path. +func MetadataFromFile(basePath string, path string) (Metadata, error) { + m := Metadata{ + FilePath: path, + URLPath: path[len(basePath):], + Template: "content.html", + } + raw, err := os.ReadFile(m.FilePath) + if err != nil { + return m, err + } + if !bytes.HasPrefix(raw, metadataStart) { + return m, fmt.Errorf("missing metadata header, must start with +++") + } + last := bytes.Index(raw, metadataEnd) + if last == -1 { + return m, fmt.Errorf("missing metadata header, must end with +++ on a single line") + } + rawHeader := raw[len(metadataStart):last] + lineNum := 0 + for _, headerLine := range bytes.Split(rawHeader, []byte("\n")) { + if len(headerLine) == 0 { + continue + } + line := bytes.SplitN(headerLine, []byte("="), 2) + if len(line) != 2 { + return m, fmt.Errorf("line %d: format must be 'key = value'", lineNum) + } + key := string(bytes.Trim(line[0], " ")) + val := string(bytes.Trim(line[1], ` "'`)) + switch string(key) { + case headerTitle: + m.Title = val + case headerAuthor: + m.Author = val + case headerDraft: + if headerDraft == "true" { + m.Draft = true + } + case headerTemplate: + m.Template = val + case headerDate: + m.Date, err = time.Parse(time.RFC3339, val) + if err != nil { + log.Printf("line %d: date must match RFC3339 format", lineNum) + } + case headerURLPath: + m.URLPath = val + default: + log.Printf("line %d: unknown header %s found in %s", lineNum, key, path) + } + lineNum += 1 + } + + return m, nil +} + +func (m Metadata) Content() template.HTML { + result := "" + raw, err := os.ReadFile(m.FilePath) + if err != nil { + log.Printf("error reading file: %w", err) + return template.HTML("") + } + + end := bytes.Index(raw, metadataEnd) + if end == -1 { + log.Printf("could not find metadata end") + return template.HTML("") + } + + result = string(blackfriday.Run(raw[end+len(metadataEnd):])) + return template.HTML(result) +} + +func (m Metadata) Render(w io.Writer, tmpl *template.Template) error { + if err := tmpl.ExecuteTemplate(w, m.Template, m); err != nil { + return fmt.Errorf("could not render content path '%s': %w", m.FilePath, err) + } + return nil +} diff --git a/static/go-mono/Go-Mono-Bold-Italic.ttf b/static/go-mono/Go-Mono-Bold-Italic.ttf new file mode 100644 index 0000000..0884406 Binary files /dev/null and b/static/go-mono/Go-Mono-Bold-Italic.ttf differ diff --git a/static/go-mono/Go-Mono-Bold.ttf b/static/go-mono/Go-Mono-Bold.ttf new file mode 100644 index 0000000..bbad5fd Binary files /dev/null and b/static/go-mono/Go-Mono-Bold.ttf differ diff --git a/static/go-mono/Go-Mono-Italic.ttf b/static/go-mono/Go-Mono-Italic.ttf new file mode 100644 index 0000000..ec27133 Binary files /dev/null and b/static/go-mono/Go-Mono-Italic.ttf differ diff --git a/static/go-mono/Go-Mono.ttf b/static/go-mono/Go-Mono.ttf new file mode 100644 index 0000000..e64e22f Binary files /dev/null and b/static/go-mono/Go-Mono.ttf differ diff --git a/static/go-mono/README b/static/go-mono/README new file mode 100644 index 0000000..7043c36 --- /dev/null +++ b/static/go-mono/README @@ -0,0 +1,36 @@ +These fonts were created by the Bigelow & Holmes foundry specifically for the +Go project. See https://blog.golang.org/go-fonts for details. + +They are licensed under the same open source license as the rest of the Go +project's software: + +Copyright (c) 2016 Bigelow & Holmes Inc.. All rights reserved. + +Distribution of this font is governed by the following license. If you do not +agree to this license, including the disclaimer, do not distribute or modify +this font. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Google Inc. nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +DISCLAIMER: THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/themes/zero/static/img/zero-knowledge.png b/static/zero-knowledge.png similarity index 100% rename from themes/zero/static/img/zero-knowledge.png rename to static/zero-knowledge.png diff --git a/templates/content.html b/templates/content.html new file mode 100644 index 0000000..daecbfa --- /dev/null +++ b/templates/content.html @@ -0,0 +1,14 @@ +{{ template "header.html" . }} +
+

{{ .Title }}

+ + {{ .Content }} +
+ +{{ template "footer.html" . }} diff --git a/templates/footer.html b/templates/footer.html new file mode 100644 index 0000000..62b274f --- /dev/null +++ b/templates/footer.html @@ -0,0 +1 @@ + diff --git a/templates/header.html b/templates/header.html new file mode 100644 index 0000000..8fec42c --- /dev/null +++ b/templates/header.html @@ -0,0 +1,37 @@ + + + + zero-knowledge - {{ .Title }} + + + +
+ +
diff --git a/templates/index.html b/templates/index.html new file mode 100644 index 0000000..131837b --- /dev/null +++ b/templates/index.html @@ -0,0 +1,8 @@ +{{ template "header.html" . }} +
+ {{ .Content }} +
+ +{{ template "footer.html" . }} diff --git a/themes/zero/layouts/_default/list.html b/themes/zero/layouts/_default/list.html deleted file mode 100644 index 8733fbe..0000000 --- a/themes/zero/layouts/_default/list.html +++ /dev/null @@ -1,3 +0,0 @@ -{{ partial "header.html" . }} - {{ partial "li" .Paginator }} -{{ partial "footer.html" . }} diff --git a/themes/zero/layouts/_default/si.html b/themes/zero/layouts/_default/si.html deleted file mode 100644 index bf06144..0000000 --- a/themes/zero/layouts/_default/si.html +++ /dev/null @@ -1,28 +0,0 @@ -
- -
-

{{ .Title }}

-
- -
- {{ .Content }} -
- -
diff --git a/themes/zero/layouts/_default/single.html b/themes/zero/layouts/_default/single.html deleted file mode 100644 index c41b37b..0000000 --- a/themes/zero/layouts/_default/single.html +++ /dev/null @@ -1,3 +0,0 @@ -{{ partial "header.html" . }} - {{ .Render "si" }} -{{ partial "footer.html" . }} diff --git a/themes/zero/layouts/_default/summary.html b/themes/zero/layouts/_default/summary.html deleted file mode 100644 index 35840f2..0000000 --- a/themes/zero/layouts/_default/summary.html +++ /dev/null @@ -1,11 +0,0 @@ -
-
-

{{ .Title }}

-
- -
- {{ .Content }} -
-
diff --git a/themes/zero/layouts/index.html b/themes/zero/layouts/index.html deleted file mode 100644 index 7c958ec..0000000 --- a/themes/zero/layouts/index.html +++ /dev/null @@ -1,3 +0,0 @@ -{{ partial "header.html" . }} -{{ partial "li.html" (.Paginate (where .Data.Pages "Section" "post")) }} -{{ partial "footer.html" . }} diff --git a/themes/zero/layouts/partials/footer.html b/themes/zero/layouts/partials/footer.html deleted file mode 100644 index b605728..0000000 --- a/themes/zero/layouts/partials/footer.html +++ /dev/null @@ -1,2 +0,0 @@ - - diff --git a/themes/zero/layouts/partials/header.html b/themes/zero/layouts/partials/header.html deleted file mode 100644 index 2681ccb..0000000 --- a/themes/zero/layouts/partials/header.html +++ /dev/null @@ -1,15 +0,0 @@ - - - - {{ if eq .URL "/" }}{{ .Site.Title }}{{ else }}{{ .Title }} - {{ .Site.Title }}{{ end }} - - - {{ if .RSSlink }} - - {{ end }} - - - -
-

{{ .Site.Title }}

-
diff --git a/themes/zero/layouts/partials/li.html b/themes/zero/layouts/partials/li.html deleted file mode 100644 index a561c55..0000000 --- a/themes/zero/layouts/partials/li.html +++ /dev/null @@ -1,21 +0,0 @@ - -
    - {{ range .Pages }} -
  • - {{ .Render "summary" }} -
  • - {{ end }} -
- diff --git a/themes/zero/layouts/post/summary.html b/themes/zero/layouts/post/summary.html deleted file mode 100644 index e7404cf..0000000 --- a/themes/zero/layouts/post/summary.html +++ /dev/null @@ -1,15 +0,0 @@ -
-
-

{{ .Title }}

-
- - -
- {{ .Summary }} -
- diff --git a/themes/zero/static/css/style.css b/themes/zero/static/css/style.css deleted file mode 100644 index e0a7fe9..0000000 --- a/themes/zero/static/css/style.css +++ /dev/null @@ -1,154 +0,0 @@ -* { - box-sizing: border-box; - margin: 0; - padding: 0; -} - -body > header > h1 { - margin: 1em; -} -body > header > h1 > a { - color: transparent; - display: block; - width: 465px; - height: 117px; - background: url('/img/zero-knowledge.png'); -} - -.entries { - list-style-type: none; - display: flex; - flex-flow: row wrap; - justify-content: space-around; - align-items: stretch; -} -.entries > li { - margin-top: 0.5em; - margin-bottom: 0.5em; - padding: 1em; -} - -body { - background-image: url('/img/background.png'); -} - -@media screen and ((max-width: 750px) or (orientation: portait)) { - .entries > li { - width: 100%; - } -} -@media screen and (min-width: 750px) and (orientation: landscape) { - .entries > li { - width: 49%; - } -} -@media screen and (min-width: 1000px) and (orientation: landscape) { - .entries > li { - width: 32%; - } -} -@media screen and (min-width: 1450px) and (orientation: landscape) { - .entries > li { - width: 24%; - } -} - -nav.pagination > a { - display: block; - width: 100%; - text-align: center; - padding: 0.5em; - - font-weight: bold; - color: #ff9900; - - background-color: #3a5f78; -} -nav.pagination > a.deactivated { - color: transparent; -} - -article { - display: flex; - flex-direction: column; -} -article > header { - order: 1; -} -article > nav { - order: 4; -} -article > aside { - order: 2; -} -article > section { - order: 3; -} - -article > header > h1, .entry > header > h1 { - border-bottom: 0.1em solid #ff9900; -} -article > header > h1 > *, .entry > header > h1 > * { - text-decoration: none; - font-size: 1.25rem; - color: #ff9900; -} - -article > aside, .entry > aside { - margin-bottom: 0.5em; -} -article > aside > *, .entry > aside > * { - color: #3a5f78; - font-weight: bold; - font-size: 0.8rem; -} -article > aside > .author::before, .entry > aside > .author::before { - content: 'by '; -} -article > aside > .date::before, .entry > aside > .date::before { - content: 'on '; -} - -article > section { -} - -article > nav > a { - font-size: 0.8rem; - color: #3a5f78; - font-weight: bold; -} -article > nav > .more::after { - content: ' >'; -} - -body > .entry > header { - margin-top: 2em; -} -.entry > header > h1, .entry > aside { - padding-left: 1em; -} -.entry > section { - margin: auto; - max-width: 50rem; - margin-bottom: 1em; -} -.entry > section h1, .entry > section h2 { - margin-top: 1em; - margin-bottom: 0.5em; - font-size: 1.25rem; - color: #3a5f78; - border-bottom: 0.1em solid #3a5f78; -} -.entry > section pre { - margin: 0.5em; - padding: 0.5em; - background-color: rgba(58, 95, 120, 0.3); - overflow-x: auto; -} -.entry > section ul, .entry > section dl, .entry > section ol { - margin: 0.5em; - margin-left: 1.5em; -} -section > p { - margin-bottom: 1.00rem; -} diff --git a/themes/zero/static/img/background.png b/themes/zero/static/img/background.png deleted file mode 100644 index a08606e..0000000 Binary files a/themes/zero/static/img/background.png and /dev/null differ diff --git a/themes/zero/static/img/grunge_readme.txt b/themes/zero/static/img/grunge_readme.txt deleted file mode 100644 index 0fc2a1f..0000000 --- a/themes/zero/static/img/grunge_readme.txt +++ /dev/null @@ -1,8 +0,0 @@ - - -======================================================== - This pattern is downloaded from www.subtlepatterns.com - If you need more, that's where to get'em. - ======================================================== - - \ No newline at end of file diff --git a/vendor/github.com/russross/blackfriday/v2/.gitignore b/vendor/github.com/russross/blackfriday/v2/.gitignore new file mode 100644 index 0000000..75623dc --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/.gitignore @@ -0,0 +1,8 @@ +*.out +*.swp +*.8 +*.6 +_obj +_test* +markdown +tags diff --git a/vendor/github.com/russross/blackfriday/v2/.travis.yml b/vendor/github.com/russross/blackfriday/v2/.travis.yml new file mode 100644 index 0000000..b0b525a --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/.travis.yml @@ -0,0 +1,17 @@ +sudo: false +language: go +go: + - "1.10.x" + - "1.11.x" + - tip +matrix: + fast_finish: true + allow_failures: + - go: tip +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v ./... diff --git a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt new file mode 100644 index 0000000..2885af3 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt @@ -0,0 +1,29 @@ +Blackfriday is distributed under the Simplified BSD License: + +> Copyright © 2011 Russ Ross +> All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions +> are met: +> +> 1. Redistributions of source code must retain the above copyright +> notice, this list of conditions and the following disclaimer. +> +> 2. Redistributions in binary form must reproduce the above +> copyright notice, this list of conditions and the following +> disclaimer in the documentation and/or other materials provided with +> the distribution. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/v2/README.md b/vendor/github.com/russross/blackfriday/v2/README.md new file mode 100644 index 0000000..d9c08a2 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/README.md @@ -0,0 +1,335 @@ +Blackfriday +[![Build Status][BuildV2SVG]][BuildV2URL] +[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL] +=========== + +Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It +is paranoid about its input (so you can safely feed it user-supplied +data), it is fast, it supports common extensions (tables, smart +punctuation substitutions, etc.), and it is safe for all utf-8 +(unicode) input. + +HTML output is currently supported, along with Smartypants +extensions. + +It started as a translation from C of [Sundown][3]. + + +Installation +------------ + +Blackfriday is compatible with modern Go releases in module mode. +With Go installed: + + go get github.com/russross/blackfriday/v2 + +will resolve and add the package to the current development module, +then build and install it. Alternatively, you can achieve the same +if you import it in a package: + + import "github.com/russross/blackfriday/v2" + +and `go get` without parameters. + +Legacy GOPATH mode is unsupported. + + +Versions +-------- + +Currently maintained and recommended version of Blackfriday is `v2`. It's being +developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the +documentation is available at +https://pkg.go.dev/github.com/russross/blackfriday/v2. + +It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`. + +Version 2 offers a number of improvements over v1: + +* Cleaned up API +* A separate call to [`Parse`][4], which produces an abstract syntax tree for + the document +* Latest bug fixes +* Flexibility to easily add your own rendering extensions + +Potential drawbacks: + +* Our benchmarks show v2 to be slightly slower than v1. Currently in the + ballpark of around 15%. +* API breakage. If you can't afford modifying your code to adhere to the new API + and don't care too much about the new features, v2 is probably not for you. +* Several bug fixes are trailing behind and still need to be forward-ported to + v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for + tracking. + +If you are still interested in the legacy `v1`, you can import it from +`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found +here: https://pkg.go.dev/github.com/russross/blackfriday. + + +Usage +----- + +For the most sensible markdown processing, it is as simple as getting your input +into a byte slice and calling: + +```go +output := blackfriday.Run(input) +``` + +Your input will be parsed and the output rendered with a set of most popular +extensions enabled. If you want the most basic feature set, corresponding with +the bare Markdown specification, use: + +```go +output := blackfriday.Run(input, blackfriday.WithNoExtensions()) +``` + +### Sanitize untrusted content + +Blackfriday itself does nothing to protect against malicious content. If you are +dealing with user-supplied markdown, we recommend running Blackfriday's output +through HTML sanitizer such as [Bluemonday][5]. + +Here's an example of simple usage of Blackfriday together with Bluemonday: + +```go +import ( + "github.com/microcosm-cc/bluemonday" + "github.com/russross/blackfriday/v2" +) + +// ... +unsafe := blackfriday.Run(input) +html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) +``` + +### Custom options + +If you want to customize the set of options, use `blackfriday.WithExtensions`, +`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. + +### `blackfriday-tool` + +You can also check out `blackfriday-tool` for a more complete example +of how to use it. Download and install it using: + + go get github.com/russross/blackfriday-tool + +This is a simple command-line tool that allows you to process a +markdown file using a standalone program. You can also browse the +source directly on github if you are just looking for some example +code: + +* + +Note that if you have not already done so, installing +`blackfriday-tool` will be sufficient to download and install +blackfriday in addition to the tool itself. The tool binary will be +installed in `$GOPATH/bin`. This is a statically-linked binary that +can be copied to wherever you need it without worrying about +dependencies and library versions. + +### Sanitized anchor names + +Blackfriday includes an algorithm for creating sanitized anchor names +corresponding to a given input text. This algorithm is used to create +anchors for headings when `AutoHeadingIDs` extension is enabled. The +algorithm has a specification, so that other packages can create +compatible anchor names and links to those anchors. + +The specification is located at https://pkg.go.dev/github.com/russross/blackfriday/v2#hdr-Sanitized_Anchor_Names. + +[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday/v2#SanitizedAnchorName) exposes this functionality, and can be used to +create compatible links to the anchor names generated by blackfriday. +This algorithm is also implemented in a small standalone package at +[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients +that want a small package and don't need full functionality of blackfriday. + + +Features +-------- + +All features of Sundown are supported, including: + +* **Compatibility**. The Markdown v1.0.3 test suite passes with + the `--tidy` option. Without `--tidy`, the differences are + mostly in whitespace and entity escaping, where blackfriday is + more consistent and cleaner. + +* **Common extensions**, including table support, fenced code + blocks, autolinks, strikethroughs, non-strict emphasis, etc. + +* **Safety**. Blackfriday is paranoid when parsing, making it safe + to feed untrusted user input without fear of bad things + happening. The test suite stress tests this and there are no + known inputs that make it crash. If you find one, please let me + know and send me the input that does it. + + NOTE: "safety" in this context means *runtime safety only*. In order to + protect yourself against JavaScript injection in untrusted content, see + [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). + +* **Fast processing**. It is fast enough to render on-demand in + most web applications without having to cache the output. + +* **Thread safety**. You can run multiple parsers in different + goroutines without ill effect. There is no dependence on global + shared state. + +* **Minimal dependencies**. Blackfriday only depends on standard + library packages in Go. The source code is pretty + self-contained, so it is easy to add to any project, including + Google App Engine projects. + +* **Standards compliant**. Output successfully validates using the + W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. + + +Extensions +---------- + +In addition to the standard markdown syntax, this package +implements the following extensions: + +* **Intra-word emphasis supression**. The `_` character is + commonly used inside words when discussing code, so having + markdown interpret it as an emphasis command is usually the + wrong thing. Blackfriday lets you treat all emphasis markers as + normal characters when they occur inside a word. + +* **Tables**. Tables can be created by drawing them in the input + using a simple syntax: + + ``` + Name | Age + --------|------ + Bob | 27 + Alice | 23 + ``` + +* **Fenced code blocks**. In addition to the normal 4-space + indentation to mark code blocks, you can explicitly mark them + and supply a language (to make syntax highlighting simple). Just + mark it like this: + + ```go + func getTrue() bool { + return true + } + ``` + + You can use 3 or more backticks to mark the beginning of the + block, and the same number to mark the end of the block. + + To preserve classes of fenced code blocks while using the bluemonday + HTML sanitizer, use the following policy: + + ```go + p := bluemonday.UGCPolicy() + p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") + html := p.SanitizeBytes(unsafe) + ``` + +* **Definition lists**. A simple definition list is made of a single-line + term followed by a colon and the definition for that term. + + Cat + : Fluffy animal everyone likes + + Internet + : Vector of transmission for pictures of cats + + Terms must be separated from the previous definition by a blank line. + +* **Footnotes**. A marker in the text that will become a superscript number; + a footnote definition that will be placed in a list of footnotes at the + end of the document. A footnote looks like this: + + This is a footnote.[^1] + + [^1]: the footnote text. + +* **Autolinking**. Blackfriday can find URLs that have not been + explicitly marked as links and turn them into links. + +* **Strikethrough**. Use two tildes (`~~`) to mark text that + should be crossed out. + +* **Hard line breaks**. With this extension enabled newlines in the input + translate into line breaks in the output. This extension is off by default. + +* **Smart quotes**. Smartypants-style punctuation substitution is + supported, turning normal double- and single-quote marks into + curly quotes, etc. + +* **LaTeX-style dash parsing** is an additional option, where `--` + is translated into `–`, and `---` is translated into + `—`. This differs from most smartypants processors, which + turn a single hyphen into an ndash and a double hyphen into an + mdash. + +* **Smart fractions**, where anything that looks like a fraction + is translated into suitable HTML (instead of just a few special + cases like most smartypant processors). For example, `4/5` + becomes `45`, which renders as + 45. + + +Other renderers +--------------- + +Blackfriday is structured to allow alternative rendering engines. Here +are a few of note: + +* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown): + provides a GitHub Flavored Markdown renderer with fenced code block + highlighting, clickable heading anchor links. + + It's not customizable, and its goal is to produce HTML output + equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), + except the rendering is performed locally. + +* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, + but for markdown. + +* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex): + renders output as LaTeX. + +* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience + integration with the [Chroma](https://github.com/alecthomas/chroma) code + highlighting library. bfchroma is only compatible with v2 of Blackfriday and + provides a drop-in renderer ready to use with Blackfriday, as well as + options and means for further customization. + +* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer. + +* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style + + +TODO +---- + +* More unit testing +* Improve Unicode support. It does not understand all Unicode + rules (about what constitutes a letter, a punctuation symbol, + etc.), so it may fail to detect word boundaries correctly in + some instances. It is safe on all UTF-8 input. + + +License +------- + +[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) + + + [1]: https://daringfireball.net/projects/markdown/ "Markdown" + [2]: https://golang.org/ "Go Language" + [3]: https://github.com/vmg/sundown "Sundown" + [4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func" + [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" + + [BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2 + [BuildV2URL]: https://travis-ci.org/russross/blackfriday + [PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2 + [PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2 diff --git a/vendor/github.com/russross/blackfriday/v2/block.go b/vendor/github.com/russross/blackfriday/v2/block.go new file mode 100644 index 0000000..dcd61e6 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/block.go @@ -0,0 +1,1612 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + "html" + "regexp" + "strings" + "unicode" +) + +const ( + charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});" + escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]" +) + +var ( + reBackslashOrAmp = regexp.MustCompile("[\\&]") + reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity) +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *Markdown) block(data []byte) { + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed heading: + // + // # Heading 1 + // ## Heading 2 + // ... + // ###### Heading 6 + if p.isPrefixHeading(data) { + data = data[p.prefixHeading(data):] + continue + } + + // block of preformatted HTML: + // + //
+ // ... + //
+ if data[0] == '<' { + if i := p.html(data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.extensions&Titleblock != 0 { + if data[0] == '%' { + if i := p.titleBlock(data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(data):] + continue + } + + // fenced code block: + // + // ``` go + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.addBlock(HorizontalRule, nil) + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.extensions&Tables != 0 { + if i := p.table(data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(data, ListTypeOrdered):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.extensions&DefinitionLists != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(data, ListTypeDefinition):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headings, too + data = data[p.paragraph(data):] + } + + p.nesting-- +} + +func (p *Markdown) addBlock(typ NodeType, content []byte) *Node { + p.closeUnmatchedBlocks() + container := p.addChild(typ, 0) + container.content = content + return container +} + +func (p *Markdown) isPrefixHeading(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.extensions&SpaceHeadings != 0 { + level := 0 + for level < 6 && level < len(data) && data[level] == '#' { + level++ + } + if level == len(data) || data[level] != ' ' { + return false + } + } + return true +} + +func (p *Markdown) prefixHeading(data []byte) int { + level := 0 + for level < 6 && level < len(data) && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.extensions&HeadingIDs != 0 { + j, k := 0, 0 + // find start/end of heading id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract heading id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.extensions&AutoHeadingIDs != 0 { + id = SanitizedAnchorName(string(data[i:end])) + } + block := p.addBlock(Heading, data[i:end]) + block.HeadingID = id + block.Level = level + } + return skip +} + +func (p *Markdown) isUnderlinedHeading(data []byte) int { + // test of level 1 heading + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if i < len(data) && data[i] == '\n' { + return 1 + } + return 0 + } + + // test of level 2 heading + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if i < len(data) && data[i] == '\n' { + return 2 + } + return 0 + } + + return 0 +} + +func (p *Markdown) titleBlock(data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + consumed := len(data) + data = bytes.TrimPrefix(data, []byte("% ")) + data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1) + block := p.addBlock(Heading, data) + block.Level = 1 + block.IsTitleblock = true + + return consumed +} + +func (p *Markdown) html(data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(data, doRender); size > 0 { + return size + } + + // check for an
tag + if size := p.htmlHr(data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) + } + + return i +} + +func finalizeHTMLBlock(block *Node) { + block.Literal = block.content + block.content = nil +} + +// HTML comment, lax form +func (p *Markdown) htmlComment(data []byte, doRender bool) int { + i := p.inlineHTMLComment(data) + // needs to end with a blank line + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + block := p.addBlock(HTMLBlock, data[:end]) + finalizeHTMLBlock(block) + } + return size + } + return 0 +} + +// HR, which is the only self-closing block tag considered +func (p *Markdown) htmlHr(data []byte, doRender bool) int { + if len(data) < 4 { + return 0 + } + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
tag after all; at least not a valid one + return 0 + } + i := 3 + for i < len(data) && data[i] != '>' && data[i] != '\n' { + i++ + } + if i < len(data) && data[i] == '>' { + i++ + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) + } + return size + } + } + return 0 +} + +func (p *Markdown) htmlFindTag(data []byte) (string, bool) { + i := 0 + for i < len(data) && isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *Markdown) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + if tag == "hr" { + return 2 + } + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.extensions&LaxHTMLBlocks != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (*Markdown) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + if i < len(data) && data[i] == '\n' { + i++ + } + return i +} + +func (*Markdown) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for i < len(data) && data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, +// and returns the end index if so, or 0 otherwise. It also returns the marker found. +// If info is not nil, it gets set to the syntax specified in the fence line. +func isFenceLine(data []byte, info *string, oldmarker string) (end int, marker string) { + i, size := 0, 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + + // check for the marker characters: ~ or ` + if i >= len(data) { + return 0, "" + } + if data[i] != '~' && data[i] != '`' { + return 0, "" + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + // the marker char must occur at least 3 times + if size < 3 { + return 0, "" + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return 0, "" + } + + // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here + // into one, always get the info string, and discard it if the caller doesn't care. + if info != nil { + infoLength := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + if i == len(data) { + return i, marker + } + return 0, "" + } + + infoStart := i + + if data[i] == '{' { + i++ + infoStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + infoLength++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return 0, "" + } + + // strip all whitespace at the beginning and the end + // of the {} block + for infoLength > 0 && isspace(data[infoStart]) { + infoStart++ + infoLength-- + } + + for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { + infoLength-- + } + i++ + i = skipChar(data, i, ' ') + } else { + for i < len(data) && !isverticalspace(data[i]) { + infoLength++ + i++ + } + } + + *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) + } + + if i == len(data) { + return i, marker + } + if i > len(data) || data[i] != '\n' { + return 0, "" + } + return i + 1, marker // Take newline into account. +} + +// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, +// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. +// If doRender is true, a final newline is mandatory to recognize the fenced code block. +func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int { + var info string + beg, marker := isFenceLine(data, &info, "") + if beg == 0 || beg >= len(data) { + return 0 + } + fenceLength := beg - 1 + + var work bytes.Buffer + work.Write([]byte(info)) + work.WriteByte('\n') + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + fenceEnd, _ := isFenceLine(data[beg:], nil, marker) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + if doRender { + block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer + block.IsFenced = true + block.FenceLength = fenceLength + finalizeCodeBlock(block) + } + + return beg +} + +func unescapeChar(str []byte) []byte { + if str[0] == '\\' { + return []byte{str[1]} + } + return []byte(html.UnescapeString(string(str))) +} + +func unescapeString(str []byte) []byte { + if reBackslashOrAmp.Match(str) { + return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar) + } + return str +} + +func finalizeCodeBlock(block *Node) { + if block.IsFenced { + newlinePos := bytes.IndexByte(block.content, '\n') + firstLine := block.content[:newlinePos] + rest := block.content[newlinePos+1:] + block.Info = unescapeString(bytes.Trim(firstLine, "\n")) + block.Literal = rest + } else { + block.Literal = block.content + } + block.content = nil +} + +func (p *Markdown) table(data []byte) int { + table := p.addBlock(Table, nil) + i, columns := p.tableHeader(data) + if i == 0 { + p.tip = table.Parent + table.Unlink() + return 0 + } + + p.addBlock(TableBody, nil) + + for i < len(data) { + pipes, rowStart := 0, i + for ; i < len(data) && data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + if i < len(data) && data[i] == '\n' { + i++ + } + p.tableRow(data[rowStart:i], columns, false) + } + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) { + i := 0 + colCount := 1 + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + j := i + if j < len(data) && data[j] == '\n' { + j++ + } + header := data[:j] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]CellAlignFlags, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for i < len(data) && data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TableAlignmentLeft + dashes++ + } + for i < len(data) && data[i] == '-' { + i++ + dashes++ + } + if i < len(data) && data[i] == ':' { + i++ + columns[col] |= TableAlignmentRight + dashes++ + } + for i < len(data) && data[i] == ' ' { + i++ + } + if i == len(data) { + return + } + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for i < len(data) && data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && i < len(data) && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.addBlock(TableHead, nil) + p.tableRow(header, columns, true) + size = i + if size < len(data) && data[size] == '\n' { + size++ + } + return +} + +func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) { + p.addBlock(TableRow, nil) + i, col := 0, 0 + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for i < len(data) && data[i] == ' ' { + i++ + } + + cellStart := i + + for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' { + cellEnd-- + } + + cell := p.addBlock(TableCell, data[cellStart:cellEnd]) + cell.IsHeader = header + cell.Align = columns[col] + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + cell := p.addBlock(TableCell, nil) + cell.IsHeader = header + cell.Align = columns[col] + } + + // silently ignore rows with too many cells +} + +// returns blockquote prefix length +func (p *Markdown) quotePrefix(data []byte) int { + i := 0 + for i < 3 && i < len(data) && data[i] == ' ' { + i++ + } + if i < len(data) && data[i] == '>' { + if i+1 < len(data) && data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *Markdown) quote(data []byte) int { + block := p.addBlock(BlockQuote, nil) + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for end < len(data) && data[end] != '\n' { + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + if end < len(data) && data[end] == '\n' { + end++ + } + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + p.block(raw.Bytes()) + p.finalize(block) + return end +} + +// returns prefix length for block code +func (p *Markdown) codePrefix(data []byte) int { + if len(data) >= 1 && data[0] == '\t' { + return 1 + } + if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *Markdown) code(data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for i < len(data) && data[i] != '\n' { + i++ + } + if i < len(data) && data[i] == '\n' { + i++ + } + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffer + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer + block.IsFenced = false + finalizeCodeBlock(block) + + return i +} + +// returns unordered list item prefix +func (p *Markdown) uliPrefix(data []byte) int { + i := 0 + // start with up to 3 spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + if i >= len(data)-1 { + return 0 + } + // need one of {'*', '+', '-'} followed by a space or a tab + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + (data[i+1] != ' ' && data[i+1] != '\t') { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *Markdown) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && i < len(data) && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for i < len(data) && data[i] >= '0' && data[i] <= '9' { + i++ + } + if start == i || i >= len(data)-1 { + return 0 + } + + // we need >= 1 digits followed by a dot and a space or a tab + if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *Markdown) dliPrefix(data []byte) int { + if len(data) < 2 { + return 0 + } + i := 0 + // need a ':' followed by a space or a tab + if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') { + return 0 + } + for i < len(data) && data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *Markdown) list(data []byte, flags ListType) int { + i := 0 + flags |= ListItemBeginningOfList + block := p.addBlock(List, nil) + block.ListFlags = flags + block.Tight = true + + for i < len(data) { + skip := p.listItem(data[i:], &flags) + if flags&ListItemContainsBlock != 0 { + block.ListData.Tight = false + } + i += skip + if skip == 0 || flags&ListItemEndOfList != 0 { + break + } + flags &= ^ListItemBeginningOfList + } + + above := block.Parent + finalizeList(block) + p.tip = above + return i +} + +// Returns true if the list item is not the same type as its parent list +func (p *Markdown) listTypeChanged(data []byte, flags *ListType) bool { + if p.dliPrefix(data) > 0 && *flags&ListTypeDefinition == 0 { + return true + } else if p.oliPrefix(data) > 0 && *flags&ListTypeOrdered == 0 { + return true + } else if p.uliPrefix(data) > 0 && (*flags&ListTypeOrdered != 0 || *flags&ListTypeDefinition != 0) { + return true + } + return false +} + +// Returns true if block ends with a blank line, descending if needed +// into lists and sublists. +func endsWithBlankLine(block *Node) bool { + // TODO: figure this out. Always false now. + for block != nil { + //if block.lastLineBlank { + //return true + //} + t := block.Type + if t == List || t == Item { + block = block.LastChild + } else { + break + } + } + return false +} + +func finalizeList(block *Node) { + block.open = false + item := block.FirstChild + for item != nil { + // check for non-final list item ending with blank line: + if endsWithBlankLine(item) && item.Next != nil { + block.ListData.Tight = false + break + } + // recurse into children of list item, to see if there are spaces + // between any of them: + subItem := item.FirstChild + for subItem != nil { + if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) { + block.ListData.Tight = false + break + } + subItem = subItem.Next + } + item = item.Next + } +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *Markdown) listItem(data []byte, flags *ListType) int { + // keep track of the indentation of the first line + itemIndent := 0 + if data[0] == '\t' { + itemIndent += 4 + } else { + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + } + + var bulletChar byte = '*' + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } else { + bulletChar = data[i-2] + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^ListTypeTerm + } + } + if i == 0 { + // if in definition list, set term flag and continue + if *flags&ListTypeDefinition != 0 { + *flags |= ListTypeTerm + } else { + return 0 + } + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + codeBlockMarker := "" + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + line = i + continue + } + + // calculate the indentation + indent := 0 + indentIndex := 0 + if data[line] == '\t' { + indentIndex++ + indent += 4 + } else { + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + indentIndex++ + } + } + + chunk := data[line+indentIndex : i] + + if p.extensions&FencedCode != 0 { + // determine if in or out of codeblock + // if in codeblock, ignore normal list processing + _, marker := isFenceLine(chunk, nil, codeBlockMarker) + if marker != "" { + if codeBlockMarker == "" { + // start of codeblock + codeBlockMarker = marker + } else { + // end of codeblock. + codeBlockMarker = "" + } + } + // we are in a codeblock, write line, and continue + if codeBlockMarker != "" || marker != "" { + raw.Write(data[line+indentIndex : i]) + line = i + continue gatherlines + } + } + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + // to be a nested list, it must be indented more + // if not, it is either a different kind of list + // or the next item in the same list + if indent <= itemIndent { + if p.listTypeChanged(chunk, flags) { + *flags |= ListItemEndOfList + } else if containsBlankLine { + *flags |= ListItemContainsBlock + } + + break gatherlines + } + + if containsBlankLine { + *flags |= ListItemContainsBlock + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix heading? + case p.isPrefixHeading(chunk): + // if the heading is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= ListItemEndOfList + break gatherlines + } + *flags |= ListItemContainsBlock + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&ListTypeDefinition != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for next < len(data) && data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= ListItemEndOfList + } + } else { + *flags |= ListItemEndOfList + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + raw.WriteByte('\n') + *flags |= ListItemContainsBlock + } + + // if this line was preceded by one or more blanks, + // re-introduce the blank into the buffer + if containsBlankLine { + containsBlankLine = false + raw.WriteByte('\n') + } + + // add the line into the working buffer without prefix + raw.Write(data[line+indentIndex : i]) + + line = i + } + + rawBytes := raw.Bytes() + + block := p.addBlock(Item, nil) + block.ListFlags = *flags + block.Tight = false + block.BulletChar = bulletChar + block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark + + // render the contents of the list item + if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(rawBytes[:sublist]) + p.block(rawBytes[sublist:]) + } else { + p.block(rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + child := p.addChild(Paragraph, 0) + child.content = rawBytes[:sublist] + p.block(rawBytes[sublist:]) + } else { + child := p.addChild(Paragraph, 0) + child.content = rawBytes + } + } + return line +} + +// render a single paragraph that has already been parsed out +func (p *Markdown) renderParagraph(data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + end := len(data) + // trim trailing newline + if data[len(data)-1] == '\n' { + end-- + } + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + p.addBlock(Paragraph, data[beg:end]) +} + +func (p *Markdown) paragraph(data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + tabSize := TabSizeDefault + if p.extensions&TabSizeEight != 0 { + tabSize = TabSizeDouble + } + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a reference or a footnote? If so, end a paragraph + // preceding it and report that we have consumed up to the end of that + // reference: + if refEnd := isReference(p, current, tabSize); refEnd > 0 { + p.renderParagraph(data[:i]) + return i + refEnd + } + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.extensions&DefinitionLists != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(data[prev:], ListTypeDefinition) + } + } + + p.renderParagraph(data[:i]) + return i + n + } + + // an underline under some text marks a heading, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeading(current); level > 0 { + // render the paragraph + p.renderParagraph(data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + id := "" + if p.extensions&AutoHeadingIDs != 0 { + id = SanitizedAnchorName(string(data[prev:eol])) + } + + block := p.addBlock(Heading, data[prev:eol]) + block.Level = level + block.HeadingID = id + + // find the end of the underline + for i < len(data) && data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.extensions&LaxHTMLBlocks != 0 { + if data[i] == '<' && p.html(current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(data[:i]) + return i + } + } + + // if there's a prefixed heading or a horizontal rule after this, paragraph is over + if p.isPrefixHeading(current) || p.isHRule(current) { + p.renderParagraph(data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.extensions&FencedCode != 0 { + if p.fencedCodeBlock(current, false) > 0 { + p.renderParagraph(data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.extensions&DefinitionLists != 0 { + if p.dliPrefix(current) != 0 { + ret := p.list(data[prev:], ListTypeDefinition) + return ret + } + } + + // if there's a list after this, paragraph is over + if p.extensions&NoEmptyLineBeforeBlock != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + nl := bytes.IndexByte(data[i:], '\n') + if nl >= 0 { + i += nl + 1 + } else { + i += len(data[i:]) + } + } + + p.renderParagraph(data[:i]) + return i +} + +func skipChar(data []byte, start int, char byte) int { + i := start + for i < len(data) && data[i] == char { + i++ + } + return i +} + +func skipUntilChar(text []byte, start int, char byte) int { + i := start + for i < len(text) && text[i] != char { + i++ + } + return i +} + +// SanitizedAnchorName returns a sanitized anchor name for the given text. +// +// It implements the algorithm specified in the package comment. +func SanitizedAnchorName(text string) string { + var anchorName []rune + futureDash := false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} diff --git a/vendor/github.com/russross/blackfriday/v2/doc.go b/vendor/github.com/russross/blackfriday/v2/doc.go new file mode 100644 index 0000000..57ff152 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/doc.go @@ -0,0 +1,46 @@ +// Package blackfriday is a markdown processor. +// +// It translates plain text with simple formatting rules into an AST, which can +// then be further processed to HTML (provided by Blackfriday itself) or other +// formats (provided by the community). +// +// The simplest way to invoke Blackfriday is to call the Run function. It will +// take a text input and produce a text output in HTML (or other format). +// +// A slightly more sophisticated way to use Blackfriday is to create a Markdown +// processor and to call Parse, which returns a syntax tree for the input +// document. You can leverage Blackfriday's parsing for content extraction from +// markdown documents. You can assign a custom renderer and set various options +// to the Markdown processor. +// +// If you're interested in calling Blackfriday from command line, see +// https://github.com/russross/blackfriday-tool. +// +// Sanitized Anchor Names +// +// Blackfriday includes an algorithm for creating sanitized anchor names +// corresponding to a given input text. This algorithm is used to create +// anchors for headings when AutoHeadingIDs extension is enabled. The +// algorithm is specified below, so that other packages can create +// compatible anchor names and links to those anchors. +// +// The algorithm iterates over the input text, interpreted as UTF-8, +// one Unicode code point (rune) at a time. All runes that are letters (category L) +// or numbers (category N) are considered valid characters. They are mapped to +// lower case, and included in the output. All other runes are considered +// invalid characters. Invalid characters that precede the first valid character, +// as well as invalid character that follow the last valid character +// are dropped completely. All other sequences of invalid characters +// between two valid characters are replaced with a single dash character '-'. +// +// SanitizedAnchorName exposes this functionality, and can be used to +// create compatible links to the anchor names generated by blackfriday. +// This algorithm is also implemented in a small standalone package at +// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients +// that want a small package and don't need full functionality of blackfriday. +package blackfriday + +// NOTE: Keep Sanitized Anchor Name algorithm in sync with package +// github.com/shurcooL/sanitized_anchor_name. +// Otherwise, users of sanitized_anchor_name will get anchor names +// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/v2/entities.go b/vendor/github.com/russross/blackfriday/v2/entities.go new file mode 100644 index 0000000..a2c3edb --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/entities.go @@ -0,0 +1,2236 @@ +package blackfriday + +// Extracted from https://html.spec.whatwg.org/multipage/entities.json +var entities = map[string]bool{ + "Æ": true, + "Æ": true, + "&": true, + "&": true, + "Á": true, + "Á": true, + "Ă": true, + "Â": true, + "Â": true, + "А": true, + "𝔄": true, + "À": true, + "À": true, + "Α": true, + "Ā": true, + "⩓": true, + "Ą": true, + "𝔸": true, + "⁡": true, + "Å": true, + "Å": true, + "𝒜": true, + "≔": true, + "Ã": true, + "Ã": true, + "Ä": true, + "Ä": true, + "∖": true, + "⫧": true, + "⌆": true, + "Б": true, + "∵": true, + "ℬ": true, + "Β": true, + "𝔅": true, + "𝔹": true, + "˘": true, + "ℬ": true, + "≎": true, + "Ч": true, + "©": true, + "©": true, + "Ć": true, + "⋒": true, + "ⅅ": true, + "ℭ": true, + "Č": true, + "Ç": true, + "Ç": true, + "Ĉ": true, + "∰": true, + "Ċ": true, + "¸": true, + "·": true, + "ℭ": true, + "Χ": true, + "⊙": true, + "⊖": true, + "⊕": true, + "⊗": true, + "∲": true, + "”": true, + "’": true, + "∷": true, + "⩴": true, + "≡": true, + "∯": true, + "∮": true, + "ℂ": true, + "∐": true, + "∳": true, + "⨯": true, + "𝒞": true, + "⋓": true, + "≍": true, + "ⅅ": true, + "⤑": true, + "Ђ": true, + "Ѕ": true, + "Џ": true, + "‡": true, + "↡": true, + "⫤": true, + "Ď": true, + "Д": true, + "∇": true, + "Δ": true, + "𝔇": true, + "´": true, + "˙": true, + "˝": true, + "`": true, + "˜": true, + "⋄": true, + "ⅆ": true, + "𝔻": true, + "¨": true, + "⃜": true, + "≐": true, + "∯": true, + "¨": true, + "⇓": true, + "⇐": true, + "⇔": true, + "⫤": true, + "⟸": true, + "⟺": true, + "⟹": true, + "⇒": true, + "⊨": true, + "⇑": true, + "⇕": true, + "∥": true, + "↓": true, + "⤓": true, + "⇵": true, + "̑": true, + "⥐": true, + "⥞": true, + "↽": true, + "⥖": true, + "⥟": true, + "⇁": true, + "⥗": true, + "⊤": true, + "↧": true, + "⇓": true, + "𝒟": true, + "Đ": true, + "Ŋ": true, + "Ð": true, + "Ð": true, + "É": true, + "É": true, + "Ě": true, + "Ê": true, + "Ê": true, + "Э": true, + "Ė": true, + "𝔈": true, + "È": true, + "È": true, + "∈": true, + "Ē": true, + "◻": true, + "▫": true, + "Ę": true, + "𝔼": true, + "Ε": true, + "⩵": true, + "≂": true, + "⇌": true, + "ℰ": true, + "⩳": true, + "Η": true, + "Ë": true, + "Ë": true, + "∃": true, + "ⅇ": true, + "Ф": true, + "𝔉": true, + "◼": true, + "▪": true, + "𝔽": true, + "∀": true, + "ℱ": true, + "ℱ": true, + "Ѓ": true, + ">": true, + ">": true, + "Γ": true, + "Ϝ": true, + "Ğ": true, + "Ģ": true, + "Ĝ": true, + "Г": true, + "Ġ": true, + "𝔊": true, + "⋙": true, + "𝔾": true, + "≥": true, + "⋛": true, + "≧": true, + "⪢": true, + "≷": true, + "⩾": true, + "≳": true, + "𝒢": true, + "≫": true, + "Ъ": true, + "ˇ": true, + "^": true, + "Ĥ": true, + "ℌ": true, + "ℋ": true, + "ℍ": true, + "─": true, + "ℋ": true, + "Ħ": true, + "≎": true, + "≏": true, + "Е": true, + "IJ": true, + "Ё": true, + "Í": true, + "Í": true, + "Î": true, + "Î": true, + "И": true, + "İ": true, + "ℑ": true, + "Ì": true, + "Ì": true, + "ℑ": true, + "Ī": true, + "ⅈ": true, + "⇒": true, + "∬": true, + "∫": true, + "⋂": true, + "⁣": true, + "⁢": true, + "Į": true, + "𝕀": true, + "Ι": true, + "ℐ": true, + "Ĩ": true, + "І": true, + "Ï": true, + "Ï": true, + "Ĵ": true, + "Й": true, + "𝔍": true, + "𝕁": true, + "𝒥": true, + "Ј": true, + "Є": true, + "Х": true, + "Ќ": true, + "Κ": true, + "Ķ": true, + "К": true, + "𝔎": true, + "𝕂": true, + "𝒦": true, + "Љ": true, + "<": true, + "<": true, + "Ĺ": true, + "Λ": true, + "⟪": true, + "ℒ": true, + "↞": true, + "Ľ": true, + "Ļ": true, + "Л": true, + "⟨": true, + "←": true, + "⇤": true, + "⇆": true, + "⌈": true, + "⟦": true, + "⥡": true, + "⇃": true, + "⥙": true, + "⌊": true, + "↔": true, + "⥎": true, + "⊣": true, + "↤": true, + "⥚": true, + "⊲": true, + "⧏": true, + "⊴": true, + "⥑": true, + "⥠": true, + "↿": true, + "⥘": true, + "↼": true, + "⥒": true, + "⇐": true, + "⇔": true, + "⋚": true, + "≦": true, + "≶": true, + "⪡": true, + "⩽": true, + "≲": true, + "𝔏": true, + "⋘": true, + "⇚": true, + "Ŀ": true, + "⟵": true, + "⟷": true, + "⟶": true, + "⟸": true, + "⟺": true, + "⟹": true, + "𝕃": true, + "↙": true, + "↘": true, + "ℒ": true, + "↰": true, + "Ł": true, + "≪": true, + "⤅": true, + "М": true, + " ": true, + "ℳ": true, + "𝔐": true, + "∓": true, + "𝕄": true, + "ℳ": true, + "Μ": true, + "Њ": true, + "Ń": true, + "Ň": true, + "Ņ": true, + "Н": true, + "​": true, + "​": true, + "​": true, + "​": true, + "≫": true, + "≪": true, + " ": true, + "𝔑": true, + "⁠": true, + " ": true, + "ℕ": true, + "⫬": true, + "≢": true, + "≭": true, + "∦": true, + "∉": true, + "≠": true, + "≂̸": true, + "∄": true, + "≯": true, + "≱": true, + "≧̸": true, + "≫̸": true, + "≹": true, + "⩾̸": true, + "≵": true, + "≎̸": true, + "≏̸": true, + "⋪": true, + "⧏̸": true, + "⋬": true, + "≮": true, + "≰": true, + "≸": true, + "≪̸": true, + "⩽̸": true, + "≴": true, + "⪢̸": true, + "⪡̸": true, + "⊀": true, + "⪯̸": true, + "⋠": true, + "∌": true, + "⋫": true, + "⧐̸": true, + "⋭": true, + "⊏̸": true, + "⋢": true, + "⊐̸": true, + "⋣": true, + "⊂⃒": true, + "⊈": true, + "⊁": true, + "⪰̸": true, + "⋡": true, + "≿̸": true, + "⊃⃒": true, + "⊉": true, + "≁": true, + "≄": true, + "≇": true, + "≉": true, + "∤": true, + "𝒩": true, + "Ñ": true, + "Ñ": true, + "Ν": true, + "Œ": true, + "Ó": true, + "Ó": true, + "Ô": true, + "Ô": true, + "О": true, + "Ő": true, + "𝔒": true, + "Ò": true, + "Ò": true, + "Ō": true, + "Ω": true, + "Ο": true, + "𝕆": true, + "“": true, + "‘": true, + "⩔": true, + "𝒪": true, + "Ø": true, + "Ø": true, + "Õ": true, + "Õ": true, + "⨷": true, + "Ö": true, + "Ö": true, + "‾": true, + "⏞": true, + "⎴": true, + "⏜": true, + "∂": true, + "П": true, + "𝔓": true, + "Φ": true, + "Π": true, + "±": true, + "ℌ": true, + "ℙ": true, + "⪻": true, + "≺": true, + "⪯": true, + "≼": true, + "≾": true, + "″": true, + "∏": true, + "∷": true, + "∝": true, + "𝒫": true, + "Ψ": true, + """: true, + """: true, + "𝔔": true, + "ℚ": true, + "𝒬": true, + "⤐": true, + "®": true, + "®": true, + "Ŕ": true, + "⟫": true, + "↠": true, + "⤖": true, + "Ř": true, + "Ŗ": true, + "Р": true, + "ℜ": true, + "∋": true, + "⇋": true, + "⥯": true, + "ℜ": true, + "Ρ": true, + "⟩": true, + "→": true, + "⇥": true, + "⇄": true, + "⌉": true, + "⟧": true, + "⥝": true, + "⇂": true, + "⥕": true, + "⌋": true, + "⊢": true, + "↦": true, + "⥛": true, + "⊳": true, + "⧐": true, + "⊵": true, + "⥏": true, + "⥜": true, + "↾": true, + "⥔": true, + "⇀": true, + "⥓": true, + "⇒": true, + "ℝ": true, + "⥰": true, + "⇛": true, + "ℛ": true, + "↱": true, + "⧴": true, + "Щ": true, + "Ш": true, + "Ь": true, + "Ś": true, + "⪼": true, + "Š": true, + "Ş": true, + "Ŝ": true, + "С": true, + "𝔖": true, + "↓": true, + "←": true, + "→": true, + "↑": true, + "Σ": true, + "∘": true, + "𝕊": true, + "√": true, + "□": true, + "⊓": true, + "⊏": true, + "⊑": true, + "⊐": true, + "⊒": true, + "⊔": true, + "𝒮": true, + "⋆": true, + "⋐": true, + "⋐": true, + "⊆": true, + "≻": true, + "⪰": true, + "≽": true, + "≿": true, + "∋": true, + "∑": true, + "⋑": true, + "⊃": true, + "⊇": true, + "⋑": true, + "Þ": true, + "Þ": true, + "™": true, + "Ћ": true, + "Ц": true, + " ": true, + "Τ": true, + "Ť": true, + "Ţ": true, + "Т": true, + "𝔗": true, + "∴": true, + "Θ": true, + "  ": true, + " ": true, + "∼": true, + "≃": true, + "≅": true, + "≈": true, + "𝕋": true, + "⃛": true, + "𝒯": true, + "Ŧ": true, + "Ú": true, + "Ú": true, + "↟": true, + "⥉": true, + "Ў": true, + "Ŭ": true, + "Û": true, + "Û": true, + "У": true, + "Ű": true, + "𝔘": true, + "Ù": true, + "Ù": true, + "Ū": true, + "_": true, + "⏟": true, + "⎵": true, + "⏝": true, + "⋃": true, + "⊎": true, + "Ų": true, + "𝕌": true, + "↑": true, + "⤒": true, + "⇅": true, + "↕": true, + "⥮": true, + "⊥": true, + "↥": true, + "⇑": true, + "⇕": true, + "↖": true, + "↗": true, + "ϒ": true, + "Υ": true, + "Ů": true, + "𝒰": true, + "Ũ": true, + "Ü": true, + "Ü": true, + "⊫": true, + "⫫": true, + "В": true, + "⊩": true, + "⫦": true, + "⋁": true, + "‖": true, + "‖": true, + "∣": true, + "|": true, + "❘": true, + "≀": true, + " ": true, + "𝔙": true, + "𝕍": true, + "𝒱": true, + "⊪": true, + "Ŵ": true, + "⋀": true, + "𝔚": true, + "𝕎": true, + "𝒲": true, + "𝔛": true, + "Ξ": true, + "𝕏": true, + "𝒳": true, + "Я": true, + "Ї": true, + "Ю": true, + "Ý": true, + "Ý": true, + "Ŷ": true, + "Ы": true, + "𝔜": true, + "𝕐": true, + "𝒴": true, + "Ÿ": true, + "Ж": true, + "Ź": true, + "Ž": true, + "З": true, + "Ż": true, + "​": true, + "Ζ": true, + "ℨ": true, + "ℤ": true, + "𝒵": true, + "á": true, + "á": true, + "ă": true, + "∾": true, + "∾̳": true, + "∿": true, + "â": true, + "â": true, + "´": true, + "´": true, + "а": true, + "æ": true, + "æ": true, + "⁡": true, + "𝔞": true, + "à": true, + "à": true, + "ℵ": true, + "ℵ": true, + "α": true, + "ā": true, + "⨿": true, + "&": true, + "&": true, + "∧": true, + "⩕": true, + "⩜": true, + "⩘": true, + "⩚": true, + "∠": true, + "⦤": true, + "∠": true, + "∡": true, + "⦨": true, + "⦩": true, + "⦪": true, + "⦫": true, + "⦬": true, + "⦭": true, + "⦮": true, + "⦯": true, + "∟": true, + "⊾": true, + "⦝": true, + "∢": true, + "Å": true, + "⍼": true, + "ą": true, + "𝕒": true, + "≈": true, + "⩰": true, + "⩯": true, + "≊": true, + "≋": true, + "'": true, + "≈": true, + "≊": true, + "å": true, + "å": true, + "𝒶": true, + "*": true, + "≈": true, + "≍": true, + "ã": true, + "ã": true, + "ä": true, + "ä": true, + "∳": true, + "⨑": true, + "⫭": true, + "≌": true, + "϶": true, + "‵": true, + "∽": true, + "⋍": true, + "⊽": true, + "⌅": true, + "⌅": true, + "⎵": true, + "⎶": true, + "≌": true, + "б": true, + "„": true, + "∵": true, + "∵": true, + "⦰": true, + "϶": true, + "ℬ": true, + "β": true, + "ℶ": true, + "≬": true, + "𝔟": true, + "⋂": true, + "◯": true, + "⋃": true, + "⨀": true, + "⨁": true, + "⨂": true, + "⨆": true, + "★": true, + "▽": true, + "△": true, + "⨄": true, + "⋁": true, + "⋀": true, + "⤍": true, + "⧫": true, + "▪": true, + "▴": true, + "▾": true, + "◂": true, + "▸": true, + "␣": true, + "▒": true, + "░": true, + "▓": true, + "█": true, + "=⃥": true, + "≡⃥": true, + "⌐": true, + "𝕓": true, + "⊥": true, + "⊥": true, + "⋈": true, + "╗": true, + "╔": true, + "╖": true, + "╓": true, + "═": true, + "╦": true, + "╩": true, + "╤": true, + "╧": true, + "╝": true, + "╚": true, + "╜": true, + "╙": true, + "║": true, + "╬": true, + "╣": true, + "╠": true, + "╫": true, + "╢": true, + "╟": true, + "⧉": true, + "╕": true, + "╒": true, + "┐": true, + "┌": true, + "─": true, + "╥": true, + "╨": true, + "┬": true, + "┴": true, + "⊟": true, + "⊞": true, + "⊠": true, + "╛": true, + "╘": true, + "┘": true, + "└": true, + "│": true, + "╪": true, + "╡": true, + "╞": true, + "┼": true, + "┤": true, + "├": true, + "‵": true, + "˘": true, + "¦": true, + "¦": true, + "𝒷": true, + "⁏": true, + "∽": true, + "⋍": true, + "\": true, + "⧅": true, + "⟈": true, + "•": true, + "•": true, + "≎": true, + "⪮": true, + "≏": true, + "≏": true, + "ć": true, + "∩": true, + "⩄": true, + "⩉": true, + "⩋": true, + "⩇": true, + "⩀": true, + "∩︀": true, + "⁁": true, + "ˇ": true, + "⩍": true, + "č": true, + "ç": true, + "ç": true, + "ĉ": true, + "⩌": true, + "⩐": true, + "ċ": true, + "¸": true, + "¸": true, + "⦲": true, + "¢": true, + "¢": true, + "·": true, + "𝔠": true, + "ч": true, + "✓": true, + "✓": true, + "χ": true, + "○": true, + "⧃": true, + "ˆ": true, + "≗": true, + "↺": true, + "↻": true, + "®": true, + "Ⓢ": true, + "⊛": true, + "⊚": true, + "⊝": true, + "≗": true, + "⨐": true, + "⫯": true, + "⧂": true, + "♣": true, + "♣": true, + ":": true, + "≔": true, + "≔": true, + ",": true, + "@": true, + "∁": true, + "∘": true, + "∁": true, + "ℂ": true, + "≅": true, + "⩭": true, + "∮": true, + "𝕔": true, + "∐": true, + "©": true, + "©": true, + "℗": true, + "↵": true, + "✗": true, + "𝒸": true, + "⫏": true, + "⫑": true, + "⫐": true, + "⫒": true, + "⋯": true, + "⤸": true, + "⤵": true, + "⋞": true, + "⋟": true, + "↶": true, + "⤽": true, + "∪": true, + "⩈": true, + "⩆": true, + "⩊": true, + "⊍": true, + "⩅": true, + "∪︀": true, + "↷": true, + "⤼": true, + "⋞": true, + "⋟": true, + "⋎": true, + "⋏": true, + "¤": true, + "¤": true, + "↶": true, + "↷": true, + "⋎": true, + "⋏": true, + "∲": true, + "∱": true, + "⌭": true, + "⇓": true, + "⥥": true, + "†": true, + "ℸ": true, + "↓": true, + "‐": true, + "⊣": true, + "⤏": true, + "˝": true, + "ď": true, + "д": true, + "ⅆ": true, + "‡": true, + "⇊": true, + "⩷": true, + "°": true, + "°": true, + "δ": true, + "⦱": true, + "⥿": true, + "𝔡": true, + "⇃": true, + "⇂": true, + "⋄": true, + "⋄": true, + "♦": true, + "♦": true, + "¨": true, + "ϝ": true, + "⋲": true, + "÷": true, + "÷": true, + "÷": true, + "⋇": true, + "⋇": true, + "ђ": true, + "⌞": true, + "⌍": true, + "$": true, + "𝕕": true, + "˙": true, + "≐": true, + "≑": true, + "∸": true, + "∔": true, + "⊡": true, + "⌆": true, + "↓": true, + "⇊": true, + "⇃": true, + "⇂": true, + "⤐": true, + "⌟": true, + "⌌": true, + "𝒹": true, + "ѕ": true, + "⧶": true, + "đ": true, + "⋱": true, + "▿": true, + "▾": true, + "⇵": true, + "⥯": true, + "⦦": true, + "џ": true, + "⟿": true, + "⩷": true, + "≑": true, + "é": true, + "é": true, + "⩮": true, + "ě": true, + "≖": true, + "ê": true, + "ê": true, + "≕": true, + "э": true, + "ė": true, + "ⅇ": true, + "≒": true, + "𝔢": true, + "⪚": true, + "è": true, + "è": true, + "⪖": true, + "⪘": true, + "⪙": true, + "⏧": true, + "ℓ": true, + "⪕": true, + "⪗": true, + "ē": true, + "∅": true, + "∅": true, + "∅": true, + " ": true, + " ": true, + " ": true, + "ŋ": true, + " ": true, + "ę": true, + "𝕖": true, + "⋕": true, + "⧣": true, + "⩱": true, + "ε": true, + "ε": true, + "ϵ": true, + "≖": true, + "≕": true, + "≂": true, + "⪖": true, + "⪕": true, + "=": true, + "≟": true, + "≡": true, + "⩸": true, + "⧥": true, + "≓": true, + "⥱": true, + "ℯ": true, + "≐": true, + "≂": true, + "η": true, + "ð": true, + "ð": true, + "ë": true, + "ë": true, + "€": true, + "!": true, + "∃": true, + "ℰ": true, + "ⅇ": true, + "≒": true, + "ф": true, + "♀": true, + "ffi": true, + "ff": true, + "ffl": true, + "𝔣": true, + "fi": true, + "fj": true, + "♭": true, + "fl": true, + "▱": true, + "ƒ": true, + "𝕗": true, + "∀": true, + "⋔": true, + "⫙": true, + "⨍": true, + "½": true, + "½": true, + "⅓": true, + "¼": true, + "¼": true, + "⅕": true, + "⅙": true, + "⅛": true, + "⅔": true, + "⅖": true, + "¾": true, + "¾": true, + "⅗": true, + "⅜": true, + "⅘": true, + "⅚": true, + "⅝": true, + "⅞": true, + "⁄": true, + "⌢": true, + "𝒻": true, + "≧": true, + "⪌": true, + "ǵ": true, + "γ": true, + "ϝ": true, + "⪆": true, + "ğ": true, + "ĝ": true, + "г": true, + "ġ": true, + "≥": true, + "⋛": true, + "≥": true, + "≧": true, + "⩾": true, + "⩾": true, + "⪩": true, + "⪀": true, + "⪂": true, + "⪄": true, + "⋛︀": true, + "⪔": true, + "𝔤": true, + "≫": true, + "⋙": true, + "ℷ": true, + "ѓ": true, + "≷": true, + "⪒": true, + "⪥": true, + "⪤": true, + "≩": true, + "⪊": true, + "⪊": true, + "⪈": true, + "⪈": true, + "≩": true, + "⋧": true, + "𝕘": true, + "`": true, + "ℊ": true, + "≳": true, + "⪎": true, + "⪐": true, + ">": true, + ">": true, + "⪧": true, + "⩺": true, + "⋗": true, + "⦕": true, + "⩼": true, + "⪆": true, + "⥸": true, + "⋗": true, + "⋛": true, + "⪌": true, + "≷": true, + "≳": true, + "≩︀": true, + "≩︀": true, + "⇔": true, + " ": true, + "½": true, + "ℋ": true, + "ъ": true, + "↔": true, + "⥈": true, + "↭": true, + "ℏ": true, + "ĥ": true, + "♥": true, + "♥": true, + "…": true, + "⊹": true, + "𝔥": true, + "⤥": true, + "⤦": true, + "⇿": true, + "∻": true, + "↩": true, + "↪": true, + "𝕙": true, + "―": true, + "𝒽": true, + "ℏ": true, + "ħ": true, + "⁃": true, + "‐": true, + "í": true, + "í": true, + "⁣": true, + "î": true, + "î": true, + "и": true, + "е": true, + "¡": true, + "¡": true, + "⇔": true, + "𝔦": true, + "ì": true, + "ì": true, + "ⅈ": true, + "⨌": true, + "∭": true, + "⧜": true, + "℩": true, + "ij": true, + "ī": true, + "ℑ": true, + "ℐ": true, + "ℑ": true, + "ı": true, + "⊷": true, + "Ƶ": true, + "∈": true, + "℅": true, + "∞": true, + "⧝": true, + "ı": true, + "∫": true, + "⊺": true, + "ℤ": true, + "⊺": true, + "⨗": true, + "⨼": true, + "ё": true, + "į": true, + "𝕚": true, + "ι": true, + "⨼": true, + "¿": true, + "¿": true, + "𝒾": true, + "∈": true, + "⋹": true, + "⋵": true, + "⋴": true, + "⋳": true, + "∈": true, + "⁢": true, + "ĩ": true, + "і": true, + "ï": true, + "ï": true, + "ĵ": true, + "й": true, + "𝔧": true, + "ȷ": true, + "𝕛": true, + "𝒿": true, + "ј": true, + "є": true, + "κ": true, + "ϰ": true, + "ķ": true, + "к": true, + "𝔨": true, + "ĸ": true, + "х": true, + "ќ": true, + "𝕜": true, + "𝓀": true, + "⇚": true, + "⇐": true, + "⤛": true, + "⤎": true, + "≦": true, + "⪋": true, + "⥢": true, + "ĺ": true, + "⦴": true, + "ℒ": true, + "λ": true, + "⟨": true, + "⦑": true, + "⟨": true, + "⪅": true, + "«": true, + "«": true, + "←": true, + "⇤": true, + "⤟": true, + "⤝": true, + "↩": true, + "↫": true, + "⤹": true, + "⥳": true, + "↢": true, + "⪫": true, + "⤙": true, + "⪭": true, + "⪭︀": true, + "⤌": true, + "❲": true, + "{": true, + "[": true, + "⦋": true, + "⦏": true, + "⦍": true, + "ľ": true, + "ļ": true, + "⌈": true, + "{": true, + "л": true, + "⤶": true, + "“": true, + "„": true, + "⥧": true, + "⥋": true, + "↲": true, + "≤": true, + "←": true, + "↢": true, + "↽": true, + "↼": true, + "⇇": true, + "↔": true, + "⇆": true, + "⇋": true, + "↭": true, + "⋋": true, + "⋚": true, + "≤": true, + "≦": true, + "⩽": true, + "⩽": true, + "⪨": true, + "⩿": true, + "⪁": true, + "⪃": true, + "⋚︀": true, + "⪓": true, + "⪅": true, + "⋖": true, + "⋚": true, + "⪋": true, + "≶": true, + "≲": true, + "⥼": true, + "⌊": true, + "𝔩": true, + "≶": true, + "⪑": true, + "↽": true, + "↼": true, + "⥪": true, + "▄": true, + "љ": true, + "≪": true, + "⇇": true, + "⌞": true, + "⥫": true, + "◺": true, + "ŀ": true, + "⎰": true, + "⎰": true, + "≨": true, + "⪉": true, + "⪉": true, + "⪇": true, + "⪇": true, + "≨": true, + "⋦": true, + "⟬": true, + "⇽": true, + "⟦": true, + "⟵": true, + "⟷": true, + "⟼": true, + "⟶": true, + "↫": true, + "↬": true, + "⦅": true, + "𝕝": true, + "⨭": true, + "⨴": true, + "∗": true, + "_": true, + "◊": true, + "◊": true, + "⧫": true, + "(": true, + "⦓": true, + "⇆": true, + "⌟": true, + "⇋": true, + "⥭": true, + "‎": true, + "⊿": true, + "‹": true, + "𝓁": true, + "↰": true, + "≲": true, + "⪍": true, + "⪏": true, + "[": true, + "‘": true, + "‚": true, + "ł": true, + "<": true, + "<": true, + "⪦": true, + "⩹": true, + "⋖": true, + "⋋": true, + "⋉": true, + "⥶": true, + "⩻": true, + "⦖": true, + "◃": true, + "⊴": true, + "◂": true, + "⥊": true, + "⥦": true, + "≨︀": true, + "≨︀": true, + "∺": true, + "¯": true, + "¯": true, + "♂": true, + "✠": true, + "✠": true, + "↦": true, + "↦": true, + "↧": true, + "↤": true, + "↥": true, + "▮": true, + "⨩": true, + "м": true, + "—": true, + "∡": true, + "𝔪": true, + "℧": true, + "µ": true, + "µ": true, + "∣": true, + "*": true, + "⫰": true, + "·": true, + "·": true, + "−": true, + "⊟": true, + "∸": true, + "⨪": true, + "⫛": true, + "…": true, + "∓": true, + "⊧": true, + "𝕞": true, + "∓": true, + "𝓂": true, + "∾": true, + "μ": true, + "⊸": true, + "⊸": true, + "⋙̸": true, + "≫⃒": true, + "≫̸": true, + "⇍": true, + "⇎": true, + "⋘̸": true, + "≪⃒": true, + "≪̸": true, + "⇏": true, + "⊯": true, + "⊮": true, + "∇": true, + "ń": true, + "∠⃒": true, + "≉": true, + "⩰̸": true, + "≋̸": true, + "ʼn": true, + "≉": true, + "♮": true, + "♮": true, + "ℕ": true, + " ": true, + " ": true, + "≎̸": true, + "≏̸": true, + "⩃": true, + "ň": true, + "ņ": true, + "≇": true, + "⩭̸": true, + "⩂": true, + "н": true, + "–": true, + "≠": true, + "⇗": true, + "⤤": true, + "↗": true, + "↗": true, + "≐̸": true, + "≢": true, + "⤨": true, + "≂̸": true, + "∄": true, + "∄": true, + "𝔫": true, + "≧̸": true, + "≱": true, + "≱": true, + "≧̸": true, + "⩾̸": true, + "⩾̸": true, + "≵": true, + "≯": true, + "≯": true, + "⇎": true, + "↮": true, + "⫲": true, + "∋": true, + "⋼": true, + "⋺": true, + "∋": true, + "њ": true, + "⇍": true, + "≦̸": true, + "↚": true, + "‥": true, + "≰": true, + "↚": true, + "↮": true, + "≰": true, + "≦̸": true, + "⩽̸": true, + "⩽̸": true, + "≮": true, + "≴": true, + "≮": true, + "⋪": true, + "⋬": true, + "∤": true, + "𝕟": true, + "¬": true, + "¬": true, + "∉": true, + "⋹̸": true, + "⋵̸": true, + "∉": true, + "⋷": true, + "⋶": true, + "∌": true, + "∌": true, + "⋾": true, + "⋽": true, + "∦": true, + "∦": true, + "⫽⃥": true, + "∂̸": true, + "⨔": true, + "⊀": true, + "⋠": true, + "⪯̸": true, + "⊀": true, + "⪯̸": true, + "⇏": true, + "↛": true, + "⤳̸": true, + "↝̸": true, + "↛": true, + "⋫": true, + "⋭": true, + "⊁": true, + "⋡": true, + "⪰̸": true, + "𝓃": true, + "∤": true, + "∦": true, + "≁": true, + "≄": true, + "≄": true, + "∤": true, + "∦": true, + "⋢": true, + "⋣": true, + "⊄": true, + "⫅̸": true, + "⊈": true, + "⊂⃒": true, + "⊈": true, + "⫅̸": true, + "⊁": true, + "⪰̸": true, + "⊅": true, + "⫆̸": true, + "⊉": true, + "⊃⃒": true, + "⊉": true, + "⫆̸": true, + "≹": true, + "ñ": true, + "ñ": true, + "≸": true, + "⋪": true, + "⋬": true, + "⋫": true, + "⋭": true, + "ν": true, + "#": true, + "№": true, + " ": true, + "⊭": true, + "⤄": true, + "≍⃒": true, + "⊬": true, + "≥⃒": true, + ">⃒": true, + "⧞": true, + "⤂": true, + "≤⃒": true, + "<⃒": true, + "⊴⃒": true, + "⤃": true, + "⊵⃒": true, + "∼⃒": true, + "⇖": true, + "⤣": true, + "↖": true, + "↖": true, + "⤧": true, + "Ⓢ": true, + "ó": true, + "ó": true, + "⊛": true, + "⊚": true, + "ô": true, + "ô": true, + "о": true, + "⊝": true, + "ő": true, + "⨸": true, + "⊙": true, + "⦼": true, + "œ": true, + "⦿": true, + "𝔬": true, + "˛": true, + "ò": true, + "ò": true, + "⧁": true, + "⦵": true, + "Ω": true, + "∮": true, + "↺": true, + "⦾": true, + "⦻": true, + "‾": true, + "⧀": true, + "ō": true, + "ω": true, + "ο": true, + "⦶": true, + "⊖": true, + "𝕠": true, + "⦷": true, + "⦹": true, + "⊕": true, + "∨": true, + "↻": true, + "⩝": true, + "ℴ": true, + "ℴ": true, + "ª": true, + "ª": true, + "º": true, + "º": true, + "⊶": true, + "⩖": true, + "⩗": true, + "⩛": true, + "ℴ": true, + "ø": true, + "ø": true, + "⊘": true, + "õ": true, + "õ": true, + "⊗": true, + "⨶": true, + "ö": true, + "ö": true, + "⌽": true, + "∥": true, + "¶": true, + "¶": true, + "∥": true, + "⫳": true, + "⫽": true, + "∂": true, + "п": true, + "%": true, + ".": true, + "‰": true, + "⊥": true, + "‱": true, + "𝔭": true, + "φ": true, + "ϕ": true, + "ℳ": true, + "☎": true, + "π": true, + "⋔": true, + "ϖ": true, + "ℏ": true, + "ℎ": true, + "ℏ": true, + "+": true, + "⨣": true, + "⊞": true, + "⨢": true, + "∔": true, + "⨥": true, + "⩲": true, + "±": true, + "±": true, + "⨦": true, + "⨧": true, + "±": true, + "⨕": true, + "𝕡": true, + "£": true, + "£": true, + "≺": true, + "⪳": true, + "⪷": true, + "≼": true, + "⪯": true, + "≺": true, + "⪷": true, + "≼": true, + "⪯": true, + "⪹": true, + "⪵": true, + "⋨": true, + "≾": true, + "′": true, + "ℙ": true, + "⪵": true, + "⪹": true, + "⋨": true, + "∏": true, + "⌮": true, + "⌒": true, + "⌓": true, + "∝": true, + "∝": true, + "≾": true, + "⊰": true, + "𝓅": true, + "ψ": true, + " ": true, + "𝔮": true, + "⨌": true, + "𝕢": true, + "⁗": true, + "𝓆": true, + "ℍ": true, + "⨖": true, + "?": true, + "≟": true, + """: true, + """: true, + "⇛": true, + "⇒": true, + "⤜": true, + "⤏": true, + "⥤": true, + "∽̱": true, + "ŕ": true, + "√": true, + "⦳": true, + "⟩": true, + "⦒": true, + "⦥": true, + "⟩": true, + "»": true, + "»": true, + "→": true, + "⥵": true, + "⇥": true, + "⤠": true, + "⤳": true, + "⤞": true, + "↪": true, + "↬": true, + "⥅": true, + "⥴": true, + "↣": true, + "↝": true, + "⤚": true, + "∶": true, + "ℚ": true, + "⤍": true, + "❳": true, + "}": true, + "]": true, + "⦌": true, + "⦎": true, + "⦐": true, + "ř": true, + "ŗ": true, + "⌉": true, + "}": true, + "р": true, + "⤷": true, + "⥩": true, + "”": true, + "”": true, + "↳": true, + "ℜ": true, + "ℛ": true, + "ℜ": true, + "ℝ": true, + "▭": true, + "®": true, + "®": true, + "⥽": true, + "⌋": true, + "𝔯": true, + "⇁": true, + "⇀": true, + "⥬": true, + "ρ": true, + "ϱ": true, + "→": true, + "↣": true, + "⇁": true, + "⇀": true, + "⇄": true, + "⇌": true, + "⇉": true, + "↝": true, + "⋌": true, + "˚": true, + "≓": true, + "⇄": true, + "⇌": true, + "‏": true, + "⎱": true, + "⎱": true, + "⫮": true, + "⟭": true, + "⇾": true, + "⟧": true, + "⦆": true, + "𝕣": true, + "⨮": true, + "⨵": true, + ")": true, + "⦔": true, + "⨒": true, + "⇉": true, + "›": true, + "𝓇": true, + "↱": true, + "]": true, + "’": true, + "’": true, + "⋌": true, + "⋊": true, + "▹": true, + "⊵": true, + "▸": true, + "⧎": true, + "⥨": true, + "℞": true, + "ś": true, + "‚": true, + "≻": true, + "⪴": true, + "⪸": true, + "š": true, + "≽": true, + "⪰": true, + "ş": true, + "ŝ": true, + "⪶": true, + "⪺": true, + "⋩": true, + "⨓": true, + "≿": true, + "с": true, + "⋅": true, + "⊡": true, + "⩦": true, + "⇘": true, + "⤥": true, + "↘": true, + "↘": true, + "§": true, + "§": true, + ";": true, + "⤩": true, + "∖": true, + "∖": true, + "✶": true, + "𝔰": true, + "⌢": true, + "♯": true, + "щ": true, + "ш": true, + "∣": true, + "∥": true, + "­": true, + "­": true, + "σ": true, + "ς": true, + "ς": true, + "∼": true, + "⩪": true, + "≃": true, + "≃": true, + "⪞": true, + "⪠": true, + "⪝": true, + "⪟": true, + "≆": true, + "⨤": true, + "⥲": true, + "←": true, + "∖": true, + "⨳": true, + "⧤": true, + "∣": true, + "⌣": true, + "⪪": true, + "⪬": true, + "⪬︀": true, + "ь": true, + "/": true, + "⧄": true, + "⌿": true, + "𝕤": true, + "♠": true, + "♠": true, + "∥": true, + "⊓": true, + "⊓︀": true, + "⊔": true, + "⊔︀": true, + "⊏": true, + "⊑": true, + "⊏": true, + "⊑": true, + "⊐": true, + "⊒": true, + "⊐": true, + "⊒": true, + "□": true, + "□": true, + "▪": true, + "▪": true, + "→": true, + "𝓈": true, + "∖": true, + "⌣": true, + "⋆": true, + "☆": true, + "★": true, + "ϵ": true, + "ϕ": true, + "¯": true, + "⊂": true, + "⫅": true, + "⪽": true, + "⊆": true, + "⫃": true, + "⫁": true, + "⫋": true, + "⊊": true, + "⪿": true, + "⥹": true, + "⊂": true, + "⊆": true, + "⫅": true, + "⊊": true, + "⫋": true, + "⫇": true, + "⫕": true, + "⫓": true, + "≻": true, + "⪸": true, + "≽": true, + "⪰": true, + "⪺": true, + "⪶": true, + "⋩": true, + "≿": true, + "∑": true, + "♪": true, + "¹": true, + "¹": true, + "²": true, + "²": true, + "³": true, + "³": true, + "⊃": true, + "⫆": true, + "⪾": true, + "⫘": true, + "⊇": true, + "⫄": true, + "⟉": true, + "⫗": true, + "⥻": true, + "⫂": true, + "⫌": true, + "⊋": true, + "⫀": true, + "⊃": true, + "⊇": true, + "⫆": true, + "⊋": true, + "⫌": true, + "⫈": true, + "⫔": true, + "⫖": true, + "⇙": true, + "⤦": true, + "↙": true, + "↙": true, + "⤪": true, + "ß": true, + "ß": true, + "⌖": true, + "τ": true, + "⎴": true, + "ť": true, + "ţ": true, + "т": true, + "⃛": true, + "⌕": true, + "𝔱": true, + "∴": true, + "∴": true, + "θ": true, + "ϑ": true, + "ϑ": true, + "≈": true, + "∼": true, + " ": true, + "≈": true, + "∼": true, + "þ": true, + "þ": true, + "˜": true, + "×": true, + "×": true, + "⊠": true, + "⨱": true, + "⨰": true, + "∭": true, + "⤨": true, + "⊤": true, + "⌶": true, + "⫱": true, + "𝕥": true, + "⫚": true, + "⤩": true, + "‴": true, + "™": true, + "▵": true, + "▿": true, + "◃": true, + "⊴": true, + "≜": true, + "▹": true, + "⊵": true, + "◬": true, + "≜": true, + "⨺": true, + "⨹": true, + "⧍": true, + "⨻": true, + "⏢": true, + "𝓉": true, + "ц": true, + "ћ": true, + "ŧ": true, + "≬": true, + "↞": true, + "↠": true, + "⇑": true, + "⥣": true, + "ú": true, + "ú": true, + "↑": true, + "ў": true, + "ŭ": true, + "û": true, + "û": true, + "у": true, + "⇅": true, + "ű": true, + "⥮": true, + "⥾": true, + "𝔲": true, + "ù": true, + "ù": true, + "↿": true, + "↾": true, + "▀": true, + "⌜": true, + "⌜": true, + "⌏": true, + "◸": true, + "ū": true, + "¨": true, + "¨": true, + "ų": true, + "𝕦": true, + "↑": true, + "↕": true, + "↿": true, + "↾": true, + "⊎": true, + "υ": true, + "ϒ": true, + "υ": true, + "⇈": true, + "⌝": true, + "⌝": true, + "⌎": true, + "ů": true, + "◹": true, + "𝓊": true, + "⋰": true, + "ũ": true, + "▵": true, + "▴": true, + "⇈": true, + "ü": true, + "ü": true, + "⦧": true, + "⇕": true, + "⫨": true, + "⫩": true, + "⊨": true, + "⦜": true, + "ϵ": true, + "ϰ": true, + "∅": true, + "ϕ": true, + "ϖ": true, + "∝": true, + "↕": true, + "ϱ": true, + "ς": true, + "⊊︀": true, + "⫋︀": true, + "⊋︀": true, + "⫌︀": true, + "ϑ": true, + "⊲": true, + "⊳": true, + "в": true, + "⊢": true, + "∨": true, + "⊻": true, + "≚": true, + "⋮": true, + "|": true, + "|": true, + "𝔳": true, + "⊲": true, + "⊂⃒": true, + "⊃⃒": true, + "𝕧": true, + "∝": true, + "⊳": true, + "𝓋": true, + "⫋︀": true, + "⊊︀": true, + "⫌︀": true, + "⊋︀": true, + "⦚": true, + "ŵ": true, + "⩟": true, + "∧": true, + "≙": true, + "℘": true, + "𝔴": true, + "𝕨": true, + "℘": true, + "≀": true, + "≀": true, + "𝓌": true, + "⋂": true, + "◯": true, + "⋃": true, + "▽": true, + "𝔵": true, + "⟺": true, + "⟷": true, + "ξ": true, + "⟸": true, + "⟵": true, + "⟼": true, + "⋻": true, + "⨀": true, + "𝕩": true, + "⨁": true, + "⨂": true, + "⟹": true, + "⟶": true, + "𝓍": true, + "⨆": true, + "⨄": true, + "△": true, + "⋁": true, + "⋀": true, + "ý": true, + "ý": true, + "я": true, + "ŷ": true, + "ы": true, + "¥": true, + "¥": true, + "𝔶": true, + "ї": true, + "𝕪": true, + "𝓎": true, + "ю": true, + "ÿ": true, + "ÿ": true, + "ź": true, + "ž": true, + "з": true, + "ż": true, + "ℨ": true, + "ζ": true, + "𝔷": true, + "ж": true, + "⇝": true, + "𝕫": true, + "𝓏": true, + "‍": true, + "‌": true, +} diff --git a/vendor/github.com/russross/blackfriday/v2/esc.go b/vendor/github.com/russross/blackfriday/v2/esc.go new file mode 100644 index 0000000..6ab6010 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/esc.go @@ -0,0 +1,70 @@ +package blackfriday + +import ( + "html" + "io" +) + +var htmlEscaper = [256][]byte{ + '&': []byte("&"), + '<': []byte("<"), + '>': []byte(">"), + '"': []byte("""), +} + +func escapeHTML(w io.Writer, s []byte) { + escapeEntities(w, s, false) +} + +func escapeAllHTML(w io.Writer, s []byte) { + escapeEntities(w, s, true) +} + +func escapeEntities(w io.Writer, s []byte, escapeValidEntities bool) { + var start, end int + for end < len(s) { + escSeq := htmlEscaper[s[end]] + if escSeq != nil { + isEntity, entityEnd := nodeIsEntity(s, end) + if isEntity && !escapeValidEntities { + w.Write(s[start : entityEnd+1]) + start = entityEnd + 1 + } else { + w.Write(s[start:end]) + w.Write(escSeq) + start = end + 1 + } + } + end++ + } + if start < len(s) && end <= len(s) { + w.Write(s[start:end]) + } +} + +func nodeIsEntity(s []byte, end int) (isEntity bool, endEntityPos int) { + isEntity = false + endEntityPos = end + 1 + + if s[end] == '&' { + for endEntityPos < len(s) { + if s[endEntityPos] == ';' { + if entities[string(s[end:endEntityPos+1])] { + isEntity = true + break + } + } + if !isalnum(s[endEntityPos]) && s[endEntityPos] != '&' && s[endEntityPos] != '#' { + break + } + endEntityPos++ + } + } + + return isEntity, endEntityPos +} + +func escLink(w io.Writer, text []byte) { + unesc := html.UnescapeString(string(text)) + escapeHTML(w, []byte(unesc)) +} diff --git a/vendor/github.com/russross/blackfriday/v2/html.go b/vendor/github.com/russross/blackfriday/v2/html.go new file mode 100644 index 0000000..cb4f26e --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/html.go @@ -0,0 +1,952 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strings" +) + +// HTMLFlags control optional behavior of HTML renderer. +type HTMLFlags int + +// HTML renderer configuration options. +const ( + HTMLFlagsNone HTMLFlags = 0 + SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks + SkipImages // Skip embedded images + SkipLinks // Skip all links + Safelink // Only link to trusted protocols + NofollowLinks // Only link with rel="nofollow" + NoreferrerLinks // Only link with rel="noreferrer" + NoopenerLinks // Only link with rel="noopener" + HrefTargetBlank // Add a blank target + CompletePage // Generate a complete HTML page + UseXHTML // Generate XHTML output instead of HTML + FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source + Smartypants // Enable smart punctuation substitutions + SmartypantsFractions // Enable smart fractions (with Smartypants) + SmartypantsDashes // Enable smart dashes (with Smartypants) + SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants) + SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering + SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants) + TOC // Generate a table of contents +) + +var ( + htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag) +) + +const ( + htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" + + processingInstruction + "|" + declaration + "|" + cdata + ")" + closeTag = "]" + openTag = "<" + tagName + attribute + "*" + "\\s*/?>" + attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)" + attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")" + attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")" + attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*" + cdata = "" + declaration = "]*>" + doubleQuotedValue = "\"[^\"]*\"" + htmlComment = "|" + processingInstruction = "[<][?].*?[?][>]" + singleQuotedValue = "'[^']*'" + tagName = "[A-Za-z][A-Za-z0-9-]*" + unquotedValue = "[^\"'=<>`\\x00-\\x20]+" +) + +// HTMLRendererParameters is a collection of supplementary parameters tweaking +// the behavior of various parts of HTML renderer. +type HTMLRendererParameters struct { + // Prepend this text to each relative URL. + AbsolutePrefix string + // Add this text to each footnote anchor, to ensure uniqueness. + FootnoteAnchorPrefix string + // Show this text inside the tag for a footnote return link, if the + // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string + // [return] is used. + FootnoteReturnLinkContents string + // If set, add this text to the front of each Heading ID, to ensure + // uniqueness. + HeadingIDPrefix string + // If set, add this text to the back of each Heading ID, to ensure uniqueness. + HeadingIDSuffix string + // Increase heading levels: if the offset is 1,

becomes

etc. + // Negative offset is also valid. + // Resulting levels are clipped between 1 and 6. + HeadingLevelOffset int + + Title string // Document title (used if CompletePage is set) + CSS string // Optional CSS file URL (used if CompletePage is set) + Icon string // Optional icon file URL (used if CompletePage is set) + + Flags HTMLFlags // Flags allow customizing this renderer's behavior +} + +// HTMLRenderer is a type that implements the Renderer interface for HTML output. +// +// Do not create this directly, instead use the NewHTMLRenderer function. +type HTMLRenderer struct { + HTMLRendererParameters + + closeTag string // how to end singleton tags: either " />" or ">" + + // Track heading IDs to prevent ID collision in a single generation. + headingIDs map[string]int + + lastOutputLen int + disableTags int + + sr *SPRenderer +} + +const ( + xhtmlClose = " />" + htmlClose = ">" +) + +// NewHTMLRenderer creates and configures an HTMLRenderer object, which +// satisfies the Renderer interface. +func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { + // configure the rendering engine + closeTag := htmlClose + if params.Flags&UseXHTML != 0 { + closeTag = xhtmlClose + } + + if params.FootnoteReturnLinkContents == "" { + // U+FE0E is VARIATION SELECTOR-15. + // It suppresses automatic emoji presentation of the preceding + // U+21A9 LEFTWARDS ARROW WITH HOOK on iOS and iPadOS. + params.FootnoteReturnLinkContents = "↩\ufe0e" + } + + return &HTMLRenderer{ + HTMLRendererParameters: params, + + closeTag: closeTag, + headingIDs: make(map[string]int), + + sr: NewSmartypantsRenderer(params.Flags), + } +} + +func isHTMLTag(tag []byte, tagname string) bool { + found, _ := findHTMLTagPos(tag, tagname) + return found +} + +// Look for a character, but ignore it when it's in any kind of quotes, it +// might be JavaScript +func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int { + inSingleQuote := false + inDoubleQuote := false + inGraveQuote := false + i := start + for i < len(html) { + switch { + case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote: + return i + case html[i] == '\'': + inSingleQuote = !inSingleQuote + case html[i] == '"': + inDoubleQuote = !inDoubleQuote + case html[i] == '`': + inGraveQuote = !inGraveQuote + } + i++ + } + return start +} + +func findHTMLTagPos(tag []byte, tagname string) (bool, int) { + i := 0 + if i < len(tag) && tag[0] != '<' { + return false, -1 + } + i++ + i = skipSpace(tag, i) + + if i < len(tag) && tag[i] == '/' { + i++ + } + + i = skipSpace(tag, i) + j := 0 + for ; i < len(tag); i, j = i+1, j+1 { + if j >= len(tagname) { + break + } + + if strings.ToLower(string(tag[i]))[0] != tagname[j] { + return false, -1 + } + } + + if i == len(tag) { + return false, -1 + } + + rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') + if rightAngle >= i { + return true, rightAngle + } + + return false, -1 +} + +func skipSpace(tag []byte, i int) int { + for i < len(tag) && isspace(tag[i]) { + i++ + } + return i +} + +func isRelativeLink(link []byte) (yes bool) { + // a tag begin with '#' + if link[0] == '#' { + return true + } + + // link begin with '/' but not '//', the second maybe a protocol relative link + if len(link) >= 2 && link[0] == '/' && link[1] != '/' { + return true + } + + // only the root '/' + if len(link) == 1 && link[0] == '/' { + return true + } + + // current directory : begin with "./" + if bytes.HasPrefix(link, []byte("./")) { + return true + } + + // parent directory : begin with "../" + if bytes.HasPrefix(link, []byte("../")) { + return true + } + + return false +} + +func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string { + for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { + tmp := fmt.Sprintf("%s-%d", id, count+1) + + if _, tmpFound := r.headingIDs[tmp]; !tmpFound { + r.headingIDs[id] = count + 1 + id = tmp + } else { + id = id + "-1" + } + } + + if _, found := r.headingIDs[id]; !found { + r.headingIDs[id] = 0 + } + + return id +} + +func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte { + if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { + newDest := r.AbsolutePrefix + if link[0] != '/' { + newDest += "/" + } + newDest += string(link) + return []byte(newDest) + } + return link +} + +func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string { + if isRelativeLink(link) { + return attrs + } + val := []string{} + if flags&NofollowLinks != 0 { + val = append(val, "nofollow") + } + if flags&NoreferrerLinks != 0 { + val = append(val, "noreferrer") + } + if flags&NoopenerLinks != 0 { + val = append(val, "noopener") + } + if flags&HrefTargetBlank != 0 { + attrs = append(attrs, "target=\"_blank\"") + } + if len(val) == 0 { + return attrs + } + attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) + return append(attrs, attr) +} + +func isMailto(link []byte) bool { + return bytes.HasPrefix(link, []byte("mailto:")) +} + +func needSkipLink(flags HTMLFlags, dest []byte) bool { + if flags&SkipLinks != 0 { + return true + } + return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) +} + +func isSmartypantable(node *Node) bool { + pt := node.Parent.Type + return pt != Link && pt != CodeBlock && pt != Code +} + +func appendLanguageAttr(attrs []string, info []byte) []string { + if len(info) == 0 { + return attrs + } + endOfLang := bytes.IndexAny(info, "\t ") + if endOfLang < 0 { + endOfLang = len(info) + } + return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang])) +} + +func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) { + w.Write(name) + if len(attrs) > 0 { + w.Write(spaceBytes) + w.Write([]byte(strings.Join(attrs, " "))) + } + w.Write(gtBytes) + r.lastOutputLen = 1 +} + +func footnoteRef(prefix string, node *Node) []byte { + urlFrag := prefix + string(slugify(node.Destination)) + anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID) + return []byte(fmt.Sprintf(`%s`, urlFrag, anchor)) +} + +func footnoteItem(prefix string, slug []byte) []byte { + return []byte(fmt.Sprintf(`
  • `, prefix, slug)) +} + +func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte { + const format = ` %s` + return []byte(fmt.Sprintf(format, prefix, slug, returnLink)) +} + +func itemOpenCR(node *Node) bool { + if node.Prev == nil { + return false + } + ld := node.Parent.ListData + return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0 +} + +func skipParagraphTags(node *Node) bool { + grandparent := node.Parent.Parent + if grandparent == nil || grandparent.Type != List { + return false + } + tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0 + return grandparent.Type == List && tightOrTerm +} + +func cellAlignment(align CellAlignFlags) string { + switch align { + case TableAlignmentLeft: + return "left" + case TableAlignmentRight: + return "right" + case TableAlignmentCenter: + return "center" + default: + return "" + } +} + +func (r *HTMLRenderer) out(w io.Writer, text []byte) { + if r.disableTags > 0 { + w.Write(htmlTagRe.ReplaceAll(text, []byte{})) + } else { + w.Write(text) + } + r.lastOutputLen = len(text) +} + +func (r *HTMLRenderer) cr(w io.Writer) { + if r.lastOutputLen > 0 { + r.out(w, nlBytes) + } +} + +var ( + nlBytes = []byte{'\n'} + gtBytes = []byte{'>'} + spaceBytes = []byte{' '} +) + +var ( + brTag = []byte("
    ") + brXHTMLTag = []byte("
    ") + emTag = []byte("") + emCloseTag = []byte("") + strongTag = []byte("") + strongCloseTag = []byte("") + delTag = []byte("") + delCloseTag = []byte("") + ttTag = []byte("") + ttCloseTag = []byte("") + aTag = []byte("") + preTag = []byte("
    ")
    +	preCloseTag        = []byte("
    ") + codeTag = []byte("") + codeCloseTag = []byte("") + pTag = []byte("

    ") + pCloseTag = []byte("

    ") + blockquoteTag = []byte("
    ") + blockquoteCloseTag = []byte("
    ") + hrTag = []byte("
    ") + hrXHTMLTag = []byte("
    ") + ulTag = []byte("
      ") + ulCloseTag = []byte("
    ") + olTag = []byte("
      ") + olCloseTag = []byte("
    ") + dlTag = []byte("
    ") + dlCloseTag = []byte("
    ") + liTag = []byte("
  • ") + liCloseTag = []byte("
  • ") + ddTag = []byte("
    ") + ddCloseTag = []byte("
    ") + dtTag = []byte("
    ") + dtCloseTag = []byte("
    ") + tableTag = []byte("") + tableCloseTag = []byte("
    ") + tdTag = []byte("") + thTag = []byte("") + theadTag = []byte("") + theadCloseTag = []byte("") + tbodyTag = []byte("") + tbodyCloseTag = []byte("") + trTag = []byte("") + trCloseTag = []byte("") + h1Tag = []byte("") + h2Tag = []byte("") + h3Tag = []byte("") + h4Tag = []byte("") + h5Tag = []byte("") + h6Tag = []byte("") + + footnotesDivBytes = []byte("\n
    \n\n") + footnotesCloseDivBytes = []byte("\n
    \n") +) + +func headingTagsFromLevel(level int) ([]byte, []byte) { + if level <= 1 { + return h1Tag, h1CloseTag + } + switch level { + case 2: + return h2Tag, h2CloseTag + case 3: + return h3Tag, h3CloseTag + case 4: + return h4Tag, h4CloseTag + case 5: + return h5Tag, h5CloseTag + } + return h6Tag, h6CloseTag +} + +func (r *HTMLRenderer) outHRTag(w io.Writer) { + if r.Flags&UseXHTML == 0 { + r.out(w, hrTag) + } else { + r.out(w, hrXHTMLTag) + } +} + +// RenderNode is a default renderer of a single node of a syntax tree. For +// block nodes it will be called twice: first time with entering=true, second +// time with entering=false, so that it could know when it's working on an open +// tag and when on close. It writes the result to w. +// +// The return value is a way to tell the calling walker to adjust its walk +// pattern: e.g. it can terminate the traversal by returning Terminate. Or it +// can ask the walker to skip a subtree of this node by returning SkipChildren. +// The typical behavior is to return GoToNext, which asks for the usual +// traversal to the next node. +func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus { + attrs := []string{} + switch node.Type { + case Text: + if r.Flags&Smartypants != 0 { + var tmp bytes.Buffer + escapeHTML(&tmp, node.Literal) + r.sr.Process(w, tmp.Bytes()) + } else { + if node.Parent.Type == Link { + escLink(w, node.Literal) + } else { + escapeHTML(w, node.Literal) + } + } + case Softbreak: + r.cr(w) + // TODO: make it configurable via out(renderer.softbreak) + case Hardbreak: + if r.Flags&UseXHTML == 0 { + r.out(w, brTag) + } else { + r.out(w, brXHTMLTag) + } + r.cr(w) + case Emph: + if entering { + r.out(w, emTag) + } else { + r.out(w, emCloseTag) + } + case Strong: + if entering { + r.out(w, strongTag) + } else { + r.out(w, strongCloseTag) + } + case Del: + if entering { + r.out(w, delTag) + } else { + r.out(w, delCloseTag) + } + case HTMLSpan: + if r.Flags&SkipHTML != 0 { + break + } + r.out(w, node.Literal) + case Link: + // mark it but don't link it if it is not a safe link: no smartypants + dest := node.LinkData.Destination + if needSkipLink(r.Flags, dest) { + if entering { + r.out(w, ttTag) + } else { + r.out(w, ttCloseTag) + } + } else { + if entering { + dest = r.addAbsPrefix(dest) + var hrefBuf bytes.Buffer + hrefBuf.WriteString("href=\"") + escLink(&hrefBuf, dest) + hrefBuf.WriteByte('"') + attrs = append(attrs, hrefBuf.String()) + if node.NoteID != 0 { + r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) + break + } + attrs = appendLinkAttrs(attrs, r.Flags, dest) + if len(node.LinkData.Title) > 0 { + var titleBuff bytes.Buffer + titleBuff.WriteString("title=\"") + escapeHTML(&titleBuff, node.LinkData.Title) + titleBuff.WriteByte('"') + attrs = append(attrs, titleBuff.String()) + } + r.tag(w, aTag, attrs) + } else { + if node.NoteID != 0 { + break + } + r.out(w, aCloseTag) + } + } + case Image: + if r.Flags&SkipImages != 0 { + return SkipChildren + } + if entering { + dest := node.LinkData.Destination + dest = r.addAbsPrefix(dest) + if r.disableTags == 0 { + //if options.safe && potentiallyUnsafe(dest) { + //out(w, ``)
+				//} else {
+				r.out(w, []byte(`<img src=`)) + } + } + case Code: + r.out(w, codeTag) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + case Document: + break + case Paragraph: + if skipParagraphTags(node) { + break + } + if entering { + // TODO: untangle this clusterfuck about when the newlines need + // to be added and when not. + if node.Prev != nil { + switch node.Prev.Type { + case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule: + r.cr(w) + } + } + if node.Parent.Type == BlockQuote && node.Prev == nil { + r.cr(w) + } + r.out(w, pTag) + } else { + r.out(w, pCloseTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case BlockQuote: + if entering { + r.cr(w) + r.out(w, blockquoteTag) + } else { + r.out(w, blockquoteCloseTag) + r.cr(w) + } + case HTMLBlock: + if r.Flags&SkipHTML != 0 { + break + } + r.cr(w) + r.out(w, node.Literal) + r.cr(w) + case Heading: + headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level + openTag, closeTag := headingTagsFromLevel(headingLevel) + if entering { + if node.IsTitleblock { + attrs = append(attrs, `class="title"`) + } + if node.HeadingID != "" { + id := r.ensureUniqueHeadingID(node.HeadingID) + if r.HeadingIDPrefix != "" { + id = r.HeadingIDPrefix + id + } + if r.HeadingIDSuffix != "" { + id = id + r.HeadingIDSuffix + } + attrs = append(attrs, fmt.Sprintf(`id="%s"`, id)) + } + r.cr(w) + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case HorizontalRule: + r.cr(w) + r.outHRTag(w) + r.cr(w) + case List: + openTag := ulTag + closeTag := ulCloseTag + if node.ListFlags&ListTypeOrdered != 0 { + openTag = olTag + closeTag = olCloseTag + } + if node.ListFlags&ListTypeDefinition != 0 { + openTag = dlTag + closeTag = dlCloseTag + } + if entering { + if node.IsFootnotesList { + r.out(w, footnotesDivBytes) + r.outHRTag(w) + r.cr(w) + } + r.cr(w) + if node.Parent.Type == Item && node.Parent.Parent.Tight { + r.cr(w) + } + r.tag(w, openTag[:len(openTag)-1], attrs) + r.cr(w) + } else { + r.out(w, closeTag) + //cr(w) + //if node.parent.Type != Item { + // cr(w) + //} + if node.Parent.Type == Item && node.Next != nil { + r.cr(w) + } + if node.Parent.Type == Document || node.Parent.Type == BlockQuote { + r.cr(w) + } + if node.IsFootnotesList { + r.out(w, footnotesCloseDivBytes) + } + } + case Item: + openTag := liTag + closeTag := liCloseTag + if node.ListFlags&ListTypeDefinition != 0 { + openTag = ddTag + closeTag = ddCloseTag + } + if node.ListFlags&ListTypeTerm != 0 { + openTag = dtTag + closeTag = dtCloseTag + } + if entering { + if itemOpenCR(node) { + r.cr(w) + } + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug)) + break + } + r.out(w, openTag) + } else { + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + if r.Flags&FootnoteReturnLinks != 0 { + r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug)) + } + } + r.out(w, closeTag) + r.cr(w) + } + case CodeBlock: + attrs = appendLanguageAttr(attrs, node.Info) + r.cr(w) + r.out(w, preTag) + r.tag(w, codeTag[:len(codeTag)-1], attrs) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + r.out(w, preCloseTag) + if node.Parent.Type != Item { + r.cr(w) + } + case Table: + if entering { + r.cr(w) + r.out(w, tableTag) + } else { + r.out(w, tableCloseTag) + r.cr(w) + } + case TableCell: + openTag := tdTag + closeTag := tdCloseTag + if node.IsHeader { + openTag = thTag + closeTag = thCloseTag + } + if entering { + align := cellAlignment(node.Align) + if align != "" { + attrs = append(attrs, fmt.Sprintf(`align="%s"`, align)) + } + if node.Prev == nil { + r.cr(w) + } + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + r.cr(w) + } + case TableHead: + if entering { + r.cr(w) + r.out(w, theadTag) + } else { + r.out(w, theadCloseTag) + r.cr(w) + } + case TableBody: + if entering { + r.cr(w) + r.out(w, tbodyTag) + // XXX: this is to adhere to a rather silly test. Should fix test. + if node.FirstChild == nil { + r.cr(w) + } + } else { + r.out(w, tbodyCloseTag) + r.cr(w) + } + case TableRow: + if entering { + r.cr(w) + r.out(w, trTag) + } else { + r.out(w, trCloseTag) + r.cr(w) + } + default: + panic("Unknown node type " + node.Type.String()) + } + return GoToNext +} + +// RenderHeader writes HTML document preamble and TOC if requested. +func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) { + r.writeDocumentHeader(w) + if r.Flags&TOC != 0 { + r.writeTOC(w, ast) + } +} + +// RenderFooter writes HTML document footer. +func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) { + if r.Flags&CompletePage == 0 { + return + } + io.WriteString(w, "\n\n\n") +} + +func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) { + if r.Flags&CompletePage == 0 { + return + } + ending := "" + if r.Flags&UseXHTML != 0 { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + ending = " /" + } else { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + } + io.WriteString(w, "\n") + io.WriteString(w, " ") + if r.Flags&Smartypants != 0 { + r.sr.Process(w, []byte(r.Title)) + } else { + escapeHTML(w, []byte(r.Title)) + } + io.WriteString(w, "\n") + io.WriteString(w, " \n") + io.WriteString(w, " \n") + if r.CSS != "" { + io.WriteString(w, " \n") + } + if r.Icon != "" { + io.WriteString(w, " \n") + } + io.WriteString(w, "\n") + io.WriteString(w, "\n\n") +} + +func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) { + buf := bytes.Buffer{} + + inHeading := false + tocLevel := 0 + headingCount := 0 + + ast.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Heading && !node.HeadingData.IsTitleblock { + inHeading = entering + if entering { + node.HeadingID = fmt.Sprintf("toc_%d", headingCount) + if node.Level == tocLevel { + buf.WriteString("\n\n
  • ") + } else if node.Level < tocLevel { + for node.Level < tocLevel { + tocLevel-- + buf.WriteString("
  • \n") + } + buf.WriteString("\n\n
  • ") + } else { + for node.Level > tocLevel { + tocLevel++ + buf.WriteString("\n") + } + + if buf.Len() > 0 { + io.WriteString(w, "\n") + } + r.lastOutputLen = buf.Len() +} diff --git a/vendor/github.com/russross/blackfriday/v2/inline.go b/vendor/github.com/russross/blackfriday/v2/inline.go new file mode 100644 index 0000000..d45bd94 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/inline.go @@ -0,0 +1,1228 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse inline elements. +// + +package blackfriday + +import ( + "bytes" + "regexp" + "strconv" +) + +var ( + urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` + anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) + + // https://www.w3.org/TR/html5/syntax.html#character-references + // highest unicode code point in 17 planes (2^20): 1,114,112d = + // 7 dec digits or 6 hex digits + // named entity references can be 2-31 characters with stuff like < + // at one end and ∳ at the other. There + // are also sometimes numbers at the end, although this isn't inherent + // in the specification; there are never numbers anywhere else in + // current character references, though; see ¾ and ▒, etc. + // https://www.w3.org/TR/html5/syntax.html#named-character-references + // + // entity := "&" (named group | number ref) ";" + // named group := [a-zA-Z]{2,31}[0-9]{0,2} + // number ref := "#" (dec ref | hex ref) + // dec ref := [0-9]{1,7} + // hex ref := ("x" | "X") [0-9a-fA-F]{1,6} + htmlEntityRe = regexp.MustCompile(`&([a-zA-Z]{2,31}[0-9]{0,2}|#([0-9]{1,7}|[xX][0-9a-fA-F]{1,6}));`) +) + +// Functions to parse text within a block +// Each function returns the number of chars taken care of +// data is the complete block being rendered +// offset is the number of valid chars before the current cursor + +func (p *Markdown) inline(currBlock *Node, data []byte) { + // handlers might call us recursively: enforce a maximum depth + if p.nesting >= p.maxNesting || len(data) == 0 { + return + } + p.nesting++ + beg, end := 0, 0 + for end < len(data) { + handler := p.inlineCallback[data[end]] + if handler != nil { + if consumed, node := handler(p, data, end); consumed == 0 { + // No action from the callback. + end++ + } else { + // Copy inactive chars into the output. + currBlock.AppendChild(text(data[beg:end])) + if node != nil { + currBlock.AppendChild(node) + } + // Skip past whatever the callback used. + beg = end + consumed + end = beg + } + } else { + end++ + } + } + if beg < len(data) { + if data[end-1] == '\n' { + end-- + } + currBlock.AppendChild(text(data[beg:end])) + } + p.nesting-- +} + +// single and double emphasis parsing +func emphasis(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + c := data[0] + + if len(data) > 2 && data[1] != c { + // whitespace cannot follow an opening emphasis; + // strikethrough only takes two characters '~~' + if c == '~' || isspace(data[1]) { + return 0, nil + } + ret, node := helperEmphasis(p, data[1:], c) + if ret == 0 { + return 0, nil + } + + return ret + 1, node + } + + if len(data) > 3 && data[1] == c && data[2] != c { + if isspace(data[2]) { + return 0, nil + } + ret, node := helperDoubleEmphasis(p, data[2:], c) + if ret == 0 { + return 0, nil + } + + return ret + 2, node + } + + if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c { + if c == '~' || isspace(data[3]) { + return 0, nil + } + ret, node := helperTripleEmphasis(p, data, 3, c) + if ret == 0 { + return 0, nil + } + + return ret + 3, node + } + + return 0, nil +} + +func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + nb := 0 + + // count the number of backticks in the delimiter + for nb < len(data) && data[nb] == '`' { + nb++ + } + + // find the next delimiter + i, end := 0, 0 + for end = nb; end < len(data) && i < nb; end++ { + if data[end] == '`' { + i++ + } else { + i = 0 + } + } + + // no matching delimiter? + if i < nb && end >= len(data) { + return 0, nil + } + + // trim outside whitespace + fBegin := nb + for fBegin < end && data[fBegin] == ' ' { + fBegin++ + } + + fEnd := end - nb + for fEnd > fBegin && data[fEnd-1] == ' ' { + fEnd-- + } + + // render the code span + if fBegin != fEnd { + code := NewNode(Code) + code.Literal = data[fBegin:fEnd] + return end, code + } + + return end, nil +} + +// newline preceded by two spaces becomes
    +func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + origOffset := offset + for offset < len(data) && data[offset] == ' ' { + offset++ + } + + if offset < len(data) && data[offset] == '\n' { + if offset-origOffset >= 2 { + return offset - origOffset + 1, NewNode(Hardbreak) + } + return offset - origOffset, nil + } + return 0, nil +} + +// newline without two spaces works when HardLineBreak is enabled +func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + if p.extensions&HardLineBreak != 0 { + return 1, NewNode(Hardbreak) + } + return 0, nil +} + +type linkType int + +const ( + linkNormal linkType = iota + linkImg + linkDeferredFootnote + linkInlineFootnote +) + +func isReferenceStyleLink(data []byte, pos int, t linkType) bool { + if t == linkDeferredFootnote { + return false + } + return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' +} + +func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +// '[': parse a link or an image or a footnote +func link(p *Markdown, data []byte, offset int) (int, *Node) { + // no links allowed inside regular links, footnote, and deferred footnotes + if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { + return 0, nil + } + + var t linkType + switch { + // special case: ![^text] == deferred footnote (that follows something with + // an exclamation point) + case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': + t = linkDeferredFootnote + // ![alt] == image + case offset >= 0 && data[offset] == '!': + t = linkImg + offset++ + // ^[text] == inline footnote + // [^refId] == deferred footnote + case p.extensions&Footnotes != 0: + if offset >= 0 && data[offset] == '^' { + t = linkInlineFootnote + offset++ + } else if len(data)-1 > offset && data[offset+1] == '^' { + t = linkDeferredFootnote + } + // [text] == regular link + default: + t = linkNormal + } + + data = data[offset:] + + var ( + i = 1 + noteID int + title, link, altContent []byte + textHasNl = false + ) + + if t == linkDeferredFootnote { + i++ + } + + // look for the matching closing bracket + for level := 1; level > 0 && i < len(data); i++ { + switch { + case data[i] == '\n': + textHasNl = true + + case isBackslashEscaped(data, i): + continue + + case data[i] == '[': + level++ + + case data[i] == ']': + level-- + if level <= 0 { + i-- // compensate for extra i++ in for loop + } + } + } + + if i >= len(data) { + return 0, nil + } + + txtE := i + i++ + var footnoteNode *Node + + // skip any amount of whitespace or newline + // (this is much more lax than original markdown syntax) + for i < len(data) && isspace(data[i]) { + i++ + } + + // inline style link + switch { + case i < len(data) && data[i] == '(': + // skip initial whitespace + i++ + + for i < len(data) && isspace(data[i]) { + i++ + } + + linkB := i + + // look for link end: ' " ) + findlinkend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')' || data[i] == '\'' || data[i] == '"': + break findlinkend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + linkE := i + + // look for title end if present + titleB, titleE := 0, 0 + if data[i] == '\'' || data[i] == '"' { + i++ + titleB = i + + findtitleend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')': + break findtitleend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + + // skip whitespace after title + titleE = i - 1 + for titleE > titleB && isspace(data[titleE]) { + titleE-- + } + + // check for closing quote presence + if data[titleE] != '\'' && data[titleE] != '"' { + titleB, titleE = 0, 0 + linkE = i + } + } + + // remove whitespace at the end of the link + for linkE > linkB && isspace(data[linkE-1]) { + linkE-- + } + + // remove optional angle brackets around the link + if data[linkB] == '<' { + linkB++ + } + if data[linkE-1] == '>' { + linkE-- + } + + // build escaped link and title + if linkE > linkB { + link = data[linkB:linkE] + } + + if titleE > titleB { + title = data[titleB:titleE] + } + + i++ + + // reference style link + case isReferenceStyleLink(data, i, t): + var id []byte + altContentConsidered := false + + // look for the id + i++ + linkB := i + for i < len(data) && data[i] != ']' { + i++ + } + if i >= len(data) { + return 0, nil + } + linkE := i + + // find the reference + if linkB == linkE { + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + id = data[1:txtE] + altContentConsidered = true + } + } else { + id = data[linkB:linkE] + } + + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + // keep link and title from reference + link = lr.link + title = lr.title + if altContentConsidered { + altContent = lr.text + } + i++ + + // shortcut reference style link or reference or inline footnote + default: + var id []byte + + // craft the id + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + if t == linkDeferredFootnote { + id = data[2:txtE] // get rid of the ^ + } else { + id = data[1:txtE] + } + } + + footnoteNode = NewNode(Item) + if t == linkInlineFootnote { + // create a new reference + noteID = len(p.notes) + 1 + + var fragment []byte + if len(id) > 0 { + if len(id) < 16 { + fragment = make([]byte, len(id)) + } else { + fragment = make([]byte, 16) + } + copy(fragment, slugify(id)) + } else { + fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) + } + + ref := &reference{ + noteID: noteID, + hasBlock: false, + link: fragment, + title: id, + footnote: footnoteNode, + } + + p.notes = append(p.notes, ref) + + link = ref.link + title = ref.title + } else { + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + if t == linkDeferredFootnote { + lr.noteID = len(p.notes) + 1 + lr.footnote = footnoteNode + p.notes = append(p.notes, lr) + } + + // keep link and title from reference + link = lr.link + // if inline footnote, title == footnote contents + title = lr.title + noteID = lr.noteID + } + + // rewind the whitespace + i = txtE + 1 + } + + var uLink []byte + if t == linkNormal || t == linkImg { + if len(link) > 0 { + var uLinkBuf bytes.Buffer + unescapeText(&uLinkBuf, link) + uLink = uLinkBuf.Bytes() + } + + // links need something to click on and somewhere to go + if len(uLink) == 0 || (t == linkNormal && txtE <= 1) { + return 0, nil + } + } + + // call the relevant rendering function + var linkNode *Node + switch t { + case linkNormal: + linkNode = NewNode(Link) + linkNode.Destination = normalizeURI(uLink) + linkNode.Title = title + if len(altContent) > 0 { + linkNode.AppendChild(text(altContent)) + } else { + // links cannot contain other links, so turn off link parsing + // temporarily and recurse + insideLink := p.insideLink + p.insideLink = true + p.inline(linkNode, data[1:txtE]) + p.insideLink = insideLink + } + + case linkImg: + linkNode = NewNode(Image) + linkNode.Destination = uLink + linkNode.Title = title + linkNode.AppendChild(text(data[1:txtE])) + i++ + + case linkInlineFootnote, linkDeferredFootnote: + linkNode = NewNode(Link) + linkNode.Destination = link + linkNode.Title = title + linkNode.NoteID = noteID + linkNode.Footnote = footnoteNode + if t == linkInlineFootnote { + i++ + } + + default: + return 0, nil + } + + return i, linkNode +} + +func (p *Markdown) inlineHTMLComment(data []byte) int { + if len(data) < 5 { + return 0 + } + if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { + return 0 + } + i := 5 + // scan for an end-of-comment marker, across lines if necessary + for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { + i++ + } + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return i + 1 +} + +func stripMailto(link []byte) []byte { + if bytes.HasPrefix(link, []byte("mailto://")) { + return link[9:] + } else if bytes.HasPrefix(link, []byte("mailto:")) { + return link[7:] + } else { + return link + } +} + +// autolinkType specifies a kind of autolink that gets detected. +type autolinkType int + +// These are the possible flag values for the autolink renderer. +const ( + notAutolink autolinkType = iota + normalAutolink + emailAutolink +) + +// '<' when tags or autolinks are allowed +func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + altype, end := tagLength(data) + if size := p.inlineHTMLComment(data); size > 0 { + end = size + } + if end > 2 { + if altype != notAutolink { + var uLink bytes.Buffer + unescapeText(&uLink, data[1:end+1-2]) + if uLink.Len() > 0 { + link := uLink.Bytes() + node := NewNode(Link) + node.Destination = link + if altype == emailAutolink { + node.Destination = append([]byte("mailto:"), link...) + } + node.AppendChild(text(stripMailto(link))) + return end, node + } + } else { + htmlTag := NewNode(HTMLSpan) + htmlTag.Literal = data[:end] + return end, htmlTag + } + } + + return end, nil +} + +// '\\' backslash escape +var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") + +func escape(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + if len(data) > 1 { + if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { + return 2, NewNode(Hardbreak) + } + if bytes.IndexByte(escapeChars, data[1]) < 0 { + return 0, nil + } + + return 2, text(data[1:2]) + } + + return 2, nil +} + +func unescapeText(ob *bytes.Buffer, src []byte) { + i := 0 + for i < len(src) { + org := i + for i < len(src) && src[i] != '\\' { + i++ + } + + if i > org { + ob.Write(src[org:i]) + } + + if i+1 >= len(src) { + break + } + + ob.WriteByte(src[i+1]) + i += 2 + } +} + +// '&' escaped when it doesn't belong to an entity +// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; +func entity(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + end := 1 + + if end < len(data) && data[end] == '#' { + end++ + } + + for end < len(data) && isalnum(data[end]) { + end++ + } + + if end < len(data) && data[end] == ';' { + end++ // real entity + } else { + return 0, nil // lone '&' + } + + ent := data[:end] + // undo & escaping or it will be converted to &amp; by another + // escaper in the renderer + if bytes.Equal(ent, []byte("&")) { + ent = []byte{'&'} + } + + return end, text(ent) +} + +func linkEndsWithEntity(data []byte, linkEnd int) bool { + entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) + return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd +} + +// hasPrefixCaseInsensitive is a custom implementation of +// strings.HasPrefix(strings.ToLower(s), prefix) +// we rolled our own because ToLower pulls in a huge machinery of lowercasing +// anything from Unicode and that's very slow. Since this func will only be +// used on ASCII protocol prefixes, we can take shortcuts. +func hasPrefixCaseInsensitive(s, prefix []byte) bool { + if len(s) < len(prefix) { + return false + } + delta := byte('a' - 'A') + for i, b := range prefix { + if b != s[i] && b != s[i]+delta { + return false + } + } + return true +} + +var protocolPrefixes = [][]byte{ + []byte("http://"), + []byte("https://"), + []byte("ftp://"), + []byte("file://"), + []byte("mailto:"), +} + +const shortestPrefix = 6 // len("ftp://"), the shortest of the above + +func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // quick check to rule out most false hits + if p.insideLink || len(data) < offset+shortestPrefix { + return 0, nil + } + for _, prefix := range protocolPrefixes { + endOfHead := offset + 8 // 8 is the len() of the longest prefix + if endOfHead > len(data) { + endOfHead = len(data) + } + if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { + return autoLink(p, data, offset) + } + } + return 0, nil +} + +func autoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // Now a more expensive check to see if we're not inside an anchor element + anchorStart := offset + offsetFromAnchor := 0 + for anchorStart > 0 && data[anchorStart] != '<' { + anchorStart-- + offsetFromAnchor++ + } + + anchorStr := anchorRe.Find(data[anchorStart:]) + if anchorStr != nil { + anchorClose := NewNode(HTMLSpan) + anchorClose.Literal = anchorStr[offsetFromAnchor:] + return len(anchorStr) - offsetFromAnchor, anchorClose + } + + // scan backward for a word boundary + rewind := 0 + for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) { + rewind++ + } + if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters + return 0, nil + } + + origData := data + data = data[offset-rewind:] + + if !isSafeLink(data) { + return 0, nil + } + + linkEnd := 0 + for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { + linkEnd++ + } + + // Skip punctuation at the end of the link + if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { + linkEnd-- + } + + // But don't skip semicolon if it's a part of escaped entity: + if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { + linkEnd-- + } + + // See if the link finishes with a punctuation sign that can be closed. + var copen byte + switch data[linkEnd-1] { + case '"': + copen = '"' + case '\'': + copen = '\'' + case ')': + copen = '(' + case ']': + copen = '[' + case '}': + copen = '{' + default: + copen = 0 + } + + if copen != 0 { + bufEnd := offset - rewind + linkEnd - 2 + + openDelim := 1 + + /* Try to close the final punctuation sign in this same line; + * if we managed to close it outside of the URL, that means that it's + * not part of the URL. If it closes inside the URL, that means it + * is part of the URL. + * + * Examples: + * + * foo http://www.pokemon.com/Pikachu_(Electric) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo (http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric)) + * + * (foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => foo http://www.pokemon.com/Pikachu_(Electric) + */ + + for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { + if origData[bufEnd] == data[linkEnd-1] { + openDelim++ + } + + if origData[bufEnd] == copen { + openDelim-- + } + + bufEnd-- + } + + if openDelim == 0 { + linkEnd-- + } + } + + var uLink bytes.Buffer + unescapeText(&uLink, data[:linkEnd]) + + if uLink.Len() > 0 { + node := NewNode(Link) + node.Destination = uLink.Bytes() + node.AppendChild(text(uLink.Bytes())) + return linkEnd, node + } + + return linkEnd, nil +} + +func isEndOfLink(char byte) bool { + return isspace(char) || char == '<' +} + +var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} +var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} + +func isSafeLink(link []byte) bool { + for _, path := range validPaths { + if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { + if len(link) == len(path) { + return true + } else if isalnum(link[len(path)]) { + return true + } + } + } + + for _, prefix := range validUris { + // TODO: handle unicode here + // case-insensitive prefix test + if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) { + return true + } + } + + return false +} + +// return the length of the given tag, or 0 is it's not valid +func tagLength(data []byte) (autolink autolinkType, end int) { + var i, j int + + // a valid tag can't be shorter than 3 chars + if len(data) < 3 { + return notAutolink, 0 + } + + // begins with a '<' optionally followed by '/', followed by letter or number + if data[0] != '<' { + return notAutolink, 0 + } + if data[1] == '/' { + i = 2 + } else { + i = 1 + } + + if !isalnum(data[i]) { + return notAutolink, 0 + } + + // scheme test + autolink = notAutolink + + // try to find the beginning of an URI + for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { + i++ + } + + if i > 1 && i < len(data) && data[i] == '@' { + if j = isMailtoAutoLink(data[i:]); j != 0 { + return emailAutolink, i + j + } + } + + if i > 2 && i < len(data) && data[i] == ':' { + autolink = normalAutolink + i++ + } + + // complete autolink test: no whitespace or ' or " + switch { + case i >= len(data): + autolink = notAutolink + case autolink != notAutolink: + j = i + + for i < len(data) { + if data[i] == '\\' { + i += 2 + } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { + break + } else { + i++ + } + + } + + if i >= len(data) { + return autolink, 0 + } + if i > j && data[i] == '>' { + return autolink, i + 1 + } + + // one of the forbidden chars has been found + autolink = notAutolink + } + i += bytes.IndexByte(data[i:], '>') + if i < 0 { + return autolink, 0 + } + return autolink, i + 1 +} + +// look for the address part of a mail autolink and '>' +// this is less strict than the original markdown e-mail address matching +func isMailtoAutoLink(data []byte) int { + nb := 0 + + // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' + for i := 0; i < len(data); i++ { + if isalnum(data[i]) { + continue + } + + switch data[i] { + case '@': + nb++ + + case '-', '.', '_': + break + + case '>': + if nb == 1 { + return i + 1 + } + return 0 + default: + return 0 + } + } + + return 0 +} + +// look for the next emph char, skipping other constructs +func helperFindEmphChar(data []byte, c byte) int { + i := 0 + + for i < len(data) { + for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { + i++ + } + if i >= len(data) { + return 0 + } + // do not count escaped chars + if i != 0 && data[i-1] == '\\' { + i++ + continue + } + if data[i] == c { + return i + } + + if data[i] == '`' { + // skip a code span + tmpI := 0 + i++ + for i < len(data) && data[i] != '`' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } else if data[i] == '[' { + // skip a link + tmpI := 0 + i++ + for i < len(data) && data[i] != ']' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\n') { + i++ + } + if i >= len(data) { + return tmpI + } + if data[i] != '[' && data[i] != '(' { // not a link + if tmpI > 0 { + return tmpI + } + continue + } + cc := data[i] + i++ + for i < len(data) && data[i] != cc { + if tmpI == 0 && data[i] == c { + return i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } + } + return 0 +} + +func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + // skip one symbol if coming from emph3 + if len(data) > 1 && data[0] == c && data[1] == c { + i = 1 + } + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + if i >= len(data) { + return 0, nil + } + + if i+1 < len(data) && data[i+1] == c { + i++ + continue + } + + if data[i] == c && !isspace(data[i-1]) { + + if p.extensions&NoIntraEmphasis != 0 { + if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) { + continue + } + } + + emph := NewNode(Emph) + p.inline(emph, data[:i]) + return i + 1, emph + } + } + + return 0, nil +} + +func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) { + nodeType := Strong + if c == '~' { + nodeType = Del + } + node := NewNode(nodeType) + p.inline(node, data[:i]) + return i + 2, node + } + i++ + } + return 0, nil +} + +func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) { + i := 0 + origData := data + data = data[offset:] + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + // skip whitespace preceded symbols + if data[i] != c || isspace(data[i-1]) { + continue + } + + switch { + case i+2 < len(data) && data[i+1] == c && data[i+2] == c: + // triple symbol found + strong := NewNode(Strong) + em := NewNode(Emph) + strong.AppendChild(em) + p.inline(em, data[:i]) + return i + 3, strong + case (i+1 < len(data) && data[i+1] == c): + // double symbol found, hand over to emph1 + length, node := helperEmphasis(p, origData[offset-2:], c) + if length == 0 { + return 0, nil + } + return length - 2, node + default: + // single symbol found, hand over to emph2 + length, node := helperDoubleEmphasis(p, origData[offset-1:], c) + if length == 0 { + return 0, nil + } + return length - 1, node + } + } + return 0, nil +} + +func text(s []byte) *Node { + node := NewNode(Text) + node.Literal = s + return node +} + +func normalizeURI(s []byte) []byte { + return s // TODO: implement +} diff --git a/vendor/github.com/russross/blackfriday/v2/markdown.go b/vendor/github.com/russross/blackfriday/v2/markdown.go new file mode 100644 index 0000000..58d2e45 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/markdown.go @@ -0,0 +1,950 @@ +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. + +package blackfriday + +import ( + "bytes" + "fmt" + "io" + "strings" + "unicode/utf8" +) + +// +// Markdown parsing and processing +// + +// Version string of the package. Appears in the rendered document when +// CompletePage flag is on. +const Version = "2.0" + +// Extensions is a bitwise or'ed collection of enabled Blackfriday's +// extensions. +type Extensions int + +// These are the supported markdown parsing extensions. +// OR these values together to select multiple extensions. +const ( + NoExtensions Extensions = 0 + NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words + Tables // Render tables + FencedCode // Render fenced code blocks + Autolink // Detect embedded URLs that are not explicitly marked + Strikethrough // Strikethrough text using ~~test~~ + LaxHTMLBlocks // Loosen up HTML block parsing rules + SpaceHeadings // Be strict about prefix heading rules + HardLineBreak // Translate newlines into line breaks + TabSizeEight // Expand tabs to eight spaces instead of four + Footnotes // Pandoc-style footnotes + NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block + HeadingIDs // specify heading IDs with {#id} + Titleblock // Titleblock ala pandoc + AutoHeadingIDs // Create the heading ID from the text + BackslashLineBreak // Translate trailing backslashes into line breaks + DefinitionLists // Render definition lists + + CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants | + SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes + + CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | + Autolink | Strikethrough | SpaceHeadings | HeadingIDs | + BackslashLineBreak | DefinitionLists +) + +// ListType contains bitwise or'ed flags for list and list item objects. +type ListType int + +// These are the possible flag values for the ListItem renderer. +// Multiple flag values may be ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + ListTypeOrdered ListType = 1 << iota + ListTypeDefinition + ListTypeTerm + + ListItemContainsBlock + ListItemBeginningOfList // TODO: figure out if this is of any use now + ListItemEndOfList +) + +// CellAlignFlags holds a type of alignment in a table cell. +type CellAlignFlags int + +// These are the possible flag values for the table cell renderer. +// Only a single one of these values will be used; they are not ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + TableAlignmentLeft CellAlignFlags = 1 << iota + TableAlignmentRight + TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) +) + +// The size of a tab stop. +const ( + TabSizeDefault = 4 + TabSizeDouble = 8 +) + +// blockTags is a set of tags that are recognized as HTML block tags. +// Any of these can be included in markdown text without special escaping. +var blockTags = map[string]struct{}{ + "blockquote": {}, + "del": {}, + "div": {}, + "dl": {}, + "fieldset": {}, + "form": {}, + "h1": {}, + "h2": {}, + "h3": {}, + "h4": {}, + "h5": {}, + "h6": {}, + "iframe": {}, + "ins": {}, + "math": {}, + "noscript": {}, + "ol": {}, + "pre": {}, + "p": {}, + "script": {}, + "style": {}, + "table": {}, + "ul": {}, + + // HTML5 + "address": {}, + "article": {}, + "aside": {}, + "canvas": {}, + "figcaption": {}, + "figure": {}, + "footer": {}, + "header": {}, + "hgroup": {}, + "main": {}, + "nav": {}, + "output": {}, + "progress": {}, + "section": {}, + "video": {}, +} + +// Renderer is the rendering interface. This is mostly of interest if you are +// implementing a new rendering format. +// +// Only an HTML implementation is provided in this repository, see the README +// for external implementations. +type Renderer interface { + // RenderNode is the main rendering method. It will be called once for + // every leaf node and twice for every non-leaf node (first with + // entering=true, then with entering=false). The method should write its + // rendition of the node to the supplied writer w. + RenderNode(w io.Writer, node *Node, entering bool) WalkStatus + + // RenderHeader is a method that allows the renderer to produce some + // content preceding the main body of the output document. The header is + // understood in the broad sense here. For example, the default HTML + // renderer will write not only the HTML document preamble, but also the + // table of contents if it was requested. + // + // The method will be passed an entire document tree, in case a particular + // implementation needs to inspect it to produce output. + // + // The output should be written to the supplied writer w. If your + // implementation has no header to write, supply an empty implementation. + RenderHeader(w io.Writer, ast *Node) + + // RenderFooter is a symmetric counterpart of RenderHeader. + RenderFooter(w io.Writer, ast *Node) +} + +// Callback functions for inline parsing. One such function is defined +// for each character that triggers a response when parsing inline data. +type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node) + +// Markdown is a type that holds extensions and the runtime state used by +// Parse, and the renderer. You can not use it directly, construct it with New. +type Markdown struct { + renderer Renderer + referenceOverride ReferenceOverrideFunc + refs map[string]*reference + inlineCallback [256]inlineParser + extensions Extensions + nesting int + maxNesting int + insideLink bool + + // Footnotes need to be ordered as well as available to quickly check for + // presence. If a ref is also a footnote, it's stored both in refs and here + // in notes. Slice is nil if footnotes not enabled. + notes []*reference + + doc *Node + tip *Node // = doc + oldTip *Node + lastMatchedContainer *Node // = doc + allClosed bool +} + +func (p *Markdown) getRef(refid string) (ref *reference, found bool) { + if p.referenceOverride != nil { + r, overridden := p.referenceOverride(refid) + if overridden { + if r == nil { + return nil, false + } + return &reference{ + link: []byte(r.Link), + title: []byte(r.Title), + noteID: 0, + hasBlock: false, + text: []byte(r.Text)}, true + } + } + // refs are case insensitive + ref, found = p.refs[strings.ToLower(refid)] + return ref, found +} + +func (p *Markdown) finalize(block *Node) { + above := block.Parent + block.open = false + p.tip = above +} + +func (p *Markdown) addChild(node NodeType, offset uint32) *Node { + return p.addExistingChild(NewNode(node), offset) +} + +func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node { + for !p.tip.canContain(node.Type) { + p.finalize(p.tip) + } + p.tip.AppendChild(node) + p.tip = node + return node +} + +func (p *Markdown) closeUnmatchedBlocks() { + if !p.allClosed { + for p.oldTip != p.lastMatchedContainer { + parent := p.oldTip.Parent + p.finalize(p.oldTip) + p.oldTip = parent + } + p.allClosed = true + } +} + +// +// +// Public interface +// +// + +// Reference represents the details of a link. +// See the documentation in Options for more details on use-case. +type Reference struct { + // Link is usually the URL the reference points to. + Link string + // Title is the alternate text describing the link in more detail. + Title string + // Text is the optional text to override the ref with if the syntax used was + // [refid][] + Text string +} + +// ReferenceOverrideFunc is expected to be called with a reference string and +// return either a valid Reference type that the reference string maps to or +// nil. If overridden is false, the default reference logic will be executed. +// See the documentation in Options for more details on use-case. +type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) + +// New constructs a Markdown processor. You can use the same With* functions as +// for Run() to customize parser's behavior and the renderer. +func New(opts ...Option) *Markdown { + var p Markdown + for _, opt := range opts { + opt(&p) + } + p.refs = make(map[string]*reference) + p.maxNesting = 16 + p.insideLink = false + docNode := NewNode(Document) + p.doc = docNode + p.tip = docNode + p.oldTip = docNode + p.lastMatchedContainer = docNode + p.allClosed = true + // register inline parsers + p.inlineCallback[' '] = maybeLineBreak + p.inlineCallback['*'] = emphasis + p.inlineCallback['_'] = emphasis + if p.extensions&Strikethrough != 0 { + p.inlineCallback['~'] = emphasis + } + p.inlineCallback['`'] = codeSpan + p.inlineCallback['\n'] = lineBreak + p.inlineCallback['['] = link + p.inlineCallback['<'] = leftAngle + p.inlineCallback['\\'] = escape + p.inlineCallback['&'] = entity + p.inlineCallback['!'] = maybeImage + p.inlineCallback['^'] = maybeInlineFootnote + if p.extensions&Autolink != 0 { + p.inlineCallback['h'] = maybeAutoLink + p.inlineCallback['m'] = maybeAutoLink + p.inlineCallback['f'] = maybeAutoLink + p.inlineCallback['H'] = maybeAutoLink + p.inlineCallback['M'] = maybeAutoLink + p.inlineCallback['F'] = maybeAutoLink + } + if p.extensions&Footnotes != 0 { + p.notes = make([]*reference, 0) + } + return &p +} + +// Option customizes the Markdown processor's default behavior. +type Option func(*Markdown) + +// WithRenderer allows you to override the default renderer. +func WithRenderer(r Renderer) Option { + return func(p *Markdown) { + p.renderer = r + } +} + +// WithExtensions allows you to pick some of the many extensions provided by +// Blackfriday. You can bitwise OR them. +func WithExtensions(e Extensions) Option { + return func(p *Markdown) { + p.extensions = e + } +} + +// WithNoExtensions turns off all extensions and custom behavior. +func WithNoExtensions() Option { + return func(p *Markdown) { + p.extensions = NoExtensions + p.renderer = NewHTMLRenderer(HTMLRendererParameters{ + Flags: HTMLFlagsNone, + }) + } +} + +// WithRefOverride sets an optional function callback that is called every +// time a reference is resolved. +// +// In Markdown, the link reference syntax can be made to resolve a link to +// a reference instead of an inline URL, in one of the following ways: +// +// * [link text][refid] +// * [refid][] +// +// Usually, the refid is defined at the bottom of the Markdown document. If +// this override function is provided, the refid is passed to the override +// function first, before consulting the defined refids at the bottom. If +// the override function indicates an override did not occur, the refids at +// the bottom will be used to fill in the link details. +func WithRefOverride(o ReferenceOverrideFunc) Option { + return func(p *Markdown) { + p.referenceOverride = o + } +} + +// Run is the main entry point to Blackfriday. It parses and renders a +// block of markdown-encoded text. +// +// The simplest invocation of Run takes one argument, input: +// output := Run(input) +// This will parse the input with CommonExtensions enabled and render it with +// the default HTMLRenderer (with CommonHTMLFlags). +// +// Variadic arguments opts can customize the default behavior. Since Markdown +// type does not contain exported fields, you can not use it directly. Instead, +// use the With* functions. For example, this will call the most basic +// functionality, with no extensions: +// output := Run(input, WithNoExtensions()) +// +// You can use any number of With* arguments, even contradicting ones. They +// will be applied in order of appearance and the latter will override the +// former: +// output := Run(input, WithNoExtensions(), WithExtensions(exts), +// WithRenderer(yourRenderer)) +func Run(input []byte, opts ...Option) []byte { + r := NewHTMLRenderer(HTMLRendererParameters{ + Flags: CommonHTMLFlags, + }) + optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} + optList = append(optList, opts...) + parser := New(optList...) + ast := parser.Parse(input) + var buf bytes.Buffer + parser.renderer.RenderHeader(&buf, ast) + ast.Walk(func(node *Node, entering bool) WalkStatus { + return parser.renderer.RenderNode(&buf, node, entering) + }) + parser.renderer.RenderFooter(&buf, ast) + return buf.Bytes() +} + +// Parse is an entry point to the parsing part of Blackfriday. It takes an +// input markdown document and produces a syntax tree for its contents. This +// tree can then be rendered with a default or custom renderer, or +// analyzed/transformed by the caller to whatever non-standard needs they have. +// The return value is the root node of the syntax tree. +func (p *Markdown) Parse(input []byte) *Node { + p.block(input) + // Walk the tree and finish up some of unfinished blocks + for p.tip != nil { + p.finalize(p.tip) + } + // Walk the tree again and process inline markdown in each block + p.doc.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) + p.parseRefsToAST() + return p.doc +} + +func (p *Markdown) parseRefsToAST() { + if p.extensions&Footnotes == 0 || len(p.notes) == 0 { + return + } + p.tip = p.doc + block := p.addBlock(List, nil) + block.IsFootnotesList = true + block.ListFlags = ListTypeOrdered + flags := ListItemBeginningOfList + // Note: this loop is intentionally explicit, not range-form. This is + // because the body of the loop will append nested footnotes to p.notes and + // we need to process those late additions. Range form would only walk over + // the fixed initial set. + for i := 0; i < len(p.notes); i++ { + ref := p.notes[i] + p.addExistingChild(ref.footnote, 0) + block := ref.footnote + block.ListFlags = flags | ListTypeOrdered + block.RefLink = ref.link + if ref.hasBlock { + flags |= ListItemContainsBlock + p.block(ref.title) + } else { + p.inline(block, ref.title) + } + flags &^= ListItemBeginningOfList | ListItemContainsBlock + } + above := block.Parent + finalizeList(block) + p.tip = above + block.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) +} + +// +// Link references +// +// This section implements support for references that (usually) appear +// as footnotes in a document, and can be referenced anywhere in the document. +// The basic format is: +// +// [1]: http://www.google.com/ "Google" +// [2]: http://www.github.com/ "Github" +// +// Anywhere in the document, the reference can be linked by referring to its +// label, i.e., 1 and 2 in this example, as in: +// +// This library is hosted on [Github][2], a git hosting site. +// +// Actual footnotes as specified in Pandoc and supported by some other Markdown +// libraries such as php-markdown are also taken care of. They look like this: +// +// This sentence needs a bit of further explanation.[^note] +// +// [^note]: This is the explanation. +// +// Footnotes should be placed at the end of the document in an ordered list. +// Finally, there are inline footnotes such as: +// +// Inline footnotes^[Also supported.] provide a quick inline explanation, +// but are rendered at the bottom of the document. +// + +// reference holds all information necessary for a reference-style links or +// footnotes. +// +// Consider this markdown with reference-style links: +// +// [link][ref] +// +// [ref]: /url/ "tooltip title" +// +// It will be ultimately converted to this HTML: +// +//

    link

    +// +// And a reference structure will be populated as follows: +// +// p.refs["ref"] = &reference{ +// link: "/url/", +// title: "tooltip title", +// } +// +// Alternatively, reference can contain information about a footnote. Consider +// this markdown: +// +// Text needing a footnote.[^a] +// +// [^a]: This is the note +// +// A reference structure will be populated as follows: +// +// p.refs["a"] = &reference{ +// link: "a", +// title: "This is the note", +// noteID: , +// } +// +// TODO: As you can see, it begs for splitting into two dedicated structures +// for refs and for footnotes. +type reference struct { + link []byte + title []byte + noteID int // 0 if not a footnote ref + hasBlock bool + footnote *Node // a link to the Item node within a list of footnotes + + text []byte // only gets populated by refOverride feature with Reference.Text +} + +func (r *reference) String() string { + return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", + r.link, r.title, r.text, r.noteID, r.hasBlock) +} + +// Check whether or not data starts with a reference link. +// If so, it is parsed and stored in the list of references +// (in the render struct). +// Returns the number of bytes to skip to move past it, +// or zero if the first line is not a reference. +func isReference(p *Markdown, data []byte, tabSize int) int { + // up to 3 optional leading spaces + if len(data) < 4 { + return 0 + } + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + + noteID := 0 + + // id part: anything but a newline between brackets + if data[i] != '[' { + return 0 + } + i++ + if p.extensions&Footnotes != 0 { + if i < len(data) && data[i] == '^' { + // we can set it to anything here because the proper noteIds will + // be assigned later during the second pass. It just has to be != 0 + noteID = 1 + i++ + } + } + idOffset := i + for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { + i++ + } + if i >= len(data) || data[i] != ']' { + return 0 + } + idEnd := i + // footnotes can have empty ID, like this: [^], but a reference can not be + // empty like this: []. Break early if it's not a footnote and there's no ID + if noteID == 0 && idOffset == idEnd { + return 0 + } + // spacer: colon (space | tab)* newline? (space | tab)* + i++ + if i >= len(data) || data[i] != ':' { + return 0 + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && (data[i] == '\n' || data[i] == '\r') { + i++ + if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { + i++ + } + } + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i >= len(data) { + return 0 + } + + var ( + linkOffset, linkEnd int + titleOffset, titleEnd int + lineEnd int + raw []byte + hasBlock bool + ) + + if p.extensions&Footnotes != 0 && noteID != 0 { + linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) + lineEnd = linkEnd + } else { + linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) + } + if lineEnd == 0 { + return 0 + } + + // a valid ref has been found + + ref := &reference{ + noteID: noteID, + hasBlock: hasBlock, + } + + if noteID > 0 { + // reusing the link field for the id since footnotes don't have links + ref.link = data[idOffset:idEnd] + // if footnote, it's not really a title, it's the contained text + ref.title = raw + } else { + ref.link = data[linkOffset:linkEnd] + ref.title = data[titleOffset:titleEnd] + } + + // id matches are case-insensitive + id := string(bytes.ToLower(data[idOffset:idEnd])) + + p.refs[id] = ref + + return lineEnd +} + +func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { + // link: whitespace-free sequence, optionally between angle brackets + if data[i] == '<' { + i++ + } + linkOffset = i + for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { + i++ + } + linkEnd = i + if data[linkOffset] == '<' && data[linkEnd-1] == '>' { + linkOffset++ + linkEnd-- + } + + // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { + return + } + + // compute end-of-line + if i >= len(data) || data[i] == '\r' || data[i] == '\n' { + lineEnd = i + } + if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { + lineEnd++ + } + + // optional (space|tab)* spacer after a newline + if lineEnd > 0 { + i = lineEnd + 1 + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + } + + // optional title: any non-newline sequence enclosed in '"() alone on its line + if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { + i++ + titleOffset = i + + // look for EOL + for i < len(data) && data[i] != '\n' && data[i] != '\r' { + i++ + } + if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { + titleEnd = i + 1 + } else { + titleEnd = i + } + + // step back + i-- + for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { + i-- + } + if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { + lineEnd = titleEnd + titleEnd = i + } + } + + return +} + +// The first bit of this logic is the same as Parser.listItem, but the rest +// is much simpler. This function simply finds the entire block and shifts it +// over by one tab if it is indeed a block (just returns the line if it's not). +// blockEnd is the end of the section in the input buffer, and contents is the +// extracted text that was shifted over one tab. It will need to be rendered at +// the end of the document. +func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { + if i == 0 || len(data) == 0 { + return + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + blockStart = i + + // find the end of the line + blockEnd = i + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[blockEnd:i]) + blockEnd = i + + // process the following lines + containsBlankLine := false + +gatherLines: + for blockEnd < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[blockEnd:i]) > 0 { + containsBlankLine = true + blockEnd = i + continue + } + + n := 0 + if n = isIndented(data[blockEnd:i], indentSize); n == 0 { + // this is the end of the block. + // we don't want to include this last line in the index. + break gatherLines + } + + // if there were blank lines before this one, insert a new one now + if containsBlankLine { + raw.WriteByte('\n') + containsBlankLine = false + } + + // get rid of that first tab, write to buffer + raw.Write(data[blockEnd+n : i]) + hasBlock = true + + blockEnd = i + } + + if data[blockEnd-1] != '\n' { + raw.WriteByte('\n') + } + + contents = raw.Bytes() + + return +} + +// +// +// Miscellaneous helper functions +// +// + +// Test if a character is a punctuation symbol. +// Taken from a private function in regexp in the stdlib. +func ispunct(c byte) bool { + for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { + if c == r { + return true + } + } + return false +} + +// Test if a character is a whitespace character. +func isspace(c byte) bool { + return ishorizontalspace(c) || isverticalspace(c) +} + +// Test if a character is a horizontal whitespace character. +func ishorizontalspace(c byte) bool { + return c == ' ' || c == '\t' +} + +// Test if a character is a vertical character. +func isverticalspace(c byte) bool { + return c == '\n' || c == '\r' || c == '\f' || c == '\v' +} + +// Test if a character is letter. +func isletter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// Test if a character is a letter or a digit. +// TODO: check when this is looking for ASCII alnum and when it should use unicode +func isalnum(c byte) bool { + return (c >= '0' && c <= '9') || isletter(c) +} + +// Replace tab characters with spaces, aligning to the next TAB_SIZE column. +// always ends output with a newline +func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { + // first, check for common cases: no tabs, or only tabs at beginning of line + i, prefix := 0, 0 + slowcase := false + for i = 0; i < len(line); i++ { + if line[i] == '\t' { + if prefix == i { + prefix++ + } else { + slowcase = true + break + } + } + } + + // no need to decode runes if all tabs are at the beginning of the line + if !slowcase { + for i = 0; i < prefix*tabSize; i++ { + out.WriteByte(' ') + } + out.Write(line[prefix:]) + return + } + + // the slow case: we need to count runes to figure out how + // many spaces to insert for each tab + column := 0 + i = 0 + for i < len(line) { + start := i + for i < len(line) && line[i] != '\t' { + _, size := utf8.DecodeRune(line[i:]) + i += size + column++ + } + + if i > start { + out.Write(line[start:i]) + } + + if i >= len(line) { + break + } + + for { + out.WriteByte(' ') + column++ + if column%tabSize == 0 { + break + } + } + + i++ + } +} + +// Find if a line counts as indented or not. +// Returns number of characters the indent is (0 = not indented). +func isIndented(data []byte, indentSize int) int { + if len(data) == 0 { + return 0 + } + if data[0] == '\t' { + return 1 + } + if len(data) < indentSize { + return 0 + } + for i := 0; i < indentSize; i++ { + if data[i] != ' ' { + return 0 + } + } + return indentSize +} + +// Create a url-safe slug for fragments +func slugify(in []byte) []byte { + if len(in) == 0 { + return in + } + out := make([]byte, 0, len(in)) + sym := false + + for _, ch := range in { + if isalnum(ch) { + sym = false + out = append(out, ch) + } else if sym { + continue + } else { + out = append(out, '-') + sym = true + } + } + var a, b int + var ch byte + for a, ch = range out { + if ch != '-' { + break + } + } + for b = len(out) - 1; b > 0; b-- { + if out[b] != '-' { + break + } + } + return out[a : b+1] +} diff --git a/vendor/github.com/russross/blackfriday/v2/node.go b/vendor/github.com/russross/blackfriday/v2/node.go new file mode 100644 index 0000000..04e6050 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/node.go @@ -0,0 +1,360 @@ +package blackfriday + +import ( + "bytes" + "fmt" +) + +// NodeType specifies a type of a single node of a syntax tree. Usually one +// node (and its type) corresponds to a single markdown feature, e.g. emphasis +// or code block. +type NodeType int + +// Constants for identifying different types of nodes. See NodeType. +const ( + Document NodeType = iota + BlockQuote + List + Item + Paragraph + Heading + HorizontalRule + Emph + Strong + Del + Link + Image + Text + HTMLBlock + CodeBlock + Softbreak + Hardbreak + Code + HTMLSpan + Table + TableCell + TableHead + TableBody + TableRow +) + +var nodeTypeNames = []string{ + Document: "Document", + BlockQuote: "BlockQuote", + List: "List", + Item: "Item", + Paragraph: "Paragraph", + Heading: "Heading", + HorizontalRule: "HorizontalRule", + Emph: "Emph", + Strong: "Strong", + Del: "Del", + Link: "Link", + Image: "Image", + Text: "Text", + HTMLBlock: "HTMLBlock", + CodeBlock: "CodeBlock", + Softbreak: "Softbreak", + Hardbreak: "Hardbreak", + Code: "Code", + HTMLSpan: "HTMLSpan", + Table: "Table", + TableCell: "TableCell", + TableHead: "TableHead", + TableBody: "TableBody", + TableRow: "TableRow", +} + +func (t NodeType) String() string { + return nodeTypeNames[t] +} + +// ListData contains fields relevant to a List and Item node type. +type ListData struct { + ListFlags ListType + Tight bool // Skip

    s around list item data if true + BulletChar byte // '*', '+' or '-' in bullet lists + Delimiter byte // '.' or ')' after the number in ordered lists + RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering + IsFootnotesList bool // This is a list of footnotes +} + +// LinkData contains fields relevant to a Link node type. +type LinkData struct { + Destination []byte // Destination is what goes into a href + Title []byte // Title is the tooltip thing that goes in a title attribute + NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote + Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. +} + +// CodeBlockData contains fields relevant to a CodeBlock node type. +type CodeBlockData struct { + IsFenced bool // Specifies whether it's a fenced code block or an indented one + Info []byte // This holds the info string + FenceChar byte + FenceLength int + FenceOffset int +} + +// TableCellData contains fields relevant to a TableCell node type. +type TableCellData struct { + IsHeader bool // This tells if it's under the header row + Align CellAlignFlags // This holds the value for align attribute +} + +// HeadingData contains fields relevant to a Heading node type. +type HeadingData struct { + Level int // This holds the heading level number + HeadingID string // This might hold heading ID, if present + IsTitleblock bool // Specifies whether it's a title block +} + +// Node is a single element in the abstract syntax tree of the parsed document. +// It holds connections to the structurally neighboring nodes and, for certain +// types of nodes, additional information that might be needed when rendering. +type Node struct { + Type NodeType // Determines the type of the node + Parent *Node // Points to the parent + FirstChild *Node // Points to the first child, if any + LastChild *Node // Points to the last child, if any + Prev *Node // Previous sibling; nil if it's the first child + Next *Node // Next sibling; nil if it's the last child + + Literal []byte // Text contents of the leaf nodes + + HeadingData // Populated if Type is Heading + ListData // Populated if Type is List + CodeBlockData // Populated if Type is CodeBlock + LinkData // Populated if Type is Link + TableCellData // Populated if Type is TableCell + + content []byte // Markdown content of the block nodes + open bool // Specifies an open block node that has not been finished to process yet +} + +// NewNode allocates a node of a specified type. +func NewNode(typ NodeType) *Node { + return &Node{ + Type: typ, + open: true, + } +} + +func (n *Node) String() string { + ellipsis := "" + snippet := n.Literal + if len(snippet) > 16 { + snippet = snippet[:16] + ellipsis = "..." + } + return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis) +} + +// Unlink removes node 'n' from the tree. +// It panics if the node is nil. +func (n *Node) Unlink() { + if n.Prev != nil { + n.Prev.Next = n.Next + } else if n.Parent != nil { + n.Parent.FirstChild = n.Next + } + if n.Next != nil { + n.Next.Prev = n.Prev + } else if n.Parent != nil { + n.Parent.LastChild = n.Prev + } + n.Parent = nil + n.Next = nil + n.Prev = nil +} + +// AppendChild adds a node 'child' as a child of 'n'. +// It panics if either node is nil. +func (n *Node) AppendChild(child *Node) { + child.Unlink() + child.Parent = n + if n.LastChild != nil { + n.LastChild.Next = child + child.Prev = n.LastChild + n.LastChild = child + } else { + n.FirstChild = child + n.LastChild = child + } +} + +// InsertBefore inserts 'sibling' immediately before 'n'. +// It panics if either node is nil. +func (n *Node) InsertBefore(sibling *Node) { + sibling.Unlink() + sibling.Prev = n.Prev + if sibling.Prev != nil { + sibling.Prev.Next = sibling + } + sibling.Next = n + n.Prev = sibling + sibling.Parent = n.Parent + if sibling.Prev == nil { + sibling.Parent.FirstChild = sibling + } +} + +// IsContainer returns true if 'n' can contain children. +func (n *Node) IsContainer() bool { + switch n.Type { + case Document: + fallthrough + case BlockQuote: + fallthrough + case List: + fallthrough + case Item: + fallthrough + case Paragraph: + fallthrough + case Heading: + fallthrough + case Emph: + fallthrough + case Strong: + fallthrough + case Del: + fallthrough + case Link: + fallthrough + case Image: + fallthrough + case Table: + fallthrough + case TableHead: + fallthrough + case TableBody: + fallthrough + case TableRow: + fallthrough + case TableCell: + return true + default: + return false + } +} + +// IsLeaf returns true if 'n' is a leaf node. +func (n *Node) IsLeaf() bool { + return !n.IsContainer() +} + +func (n *Node) canContain(t NodeType) bool { + if n.Type == List { + return t == Item + } + if n.Type == Document || n.Type == BlockQuote || n.Type == Item { + return t != Item + } + if n.Type == Table { + return t == TableHead || t == TableBody + } + if n.Type == TableHead || n.Type == TableBody { + return t == TableRow + } + if n.Type == TableRow { + return t == TableCell + } + return false +} + +// WalkStatus allows NodeVisitor to have some control over the tree traversal. +// It is returned from NodeVisitor and different values allow Node.Walk to +// decide which node to go to next. +type WalkStatus int + +const ( + // GoToNext is the default traversal of every node. + GoToNext WalkStatus = iota + // SkipChildren tells walker to skip all children of current node. + SkipChildren + // Terminate tells walker to terminate the traversal. + Terminate +) + +// NodeVisitor is a callback to be called when traversing the syntax tree. +// Called twice for every node: once with entering=true when the branch is +// first visited, then with entering=false after all the children are done. +type NodeVisitor func(node *Node, entering bool) WalkStatus + +// Walk is a convenience method that instantiates a walker and starts a +// traversal of subtree rooted at n. +func (n *Node) Walk(visitor NodeVisitor) { + w := newNodeWalker(n) + for w.current != nil { + status := visitor(w.current, w.entering) + switch status { + case GoToNext: + w.next() + case SkipChildren: + w.entering = false + w.next() + case Terminate: + return + } + } +} + +type nodeWalker struct { + current *Node + root *Node + entering bool +} + +func newNodeWalker(root *Node) *nodeWalker { + return &nodeWalker{ + current: root, + root: root, + entering: true, + } +} + +func (nw *nodeWalker) next() { + if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root { + nw.current = nil + return + } + if nw.entering && nw.current.IsContainer() { + if nw.current.FirstChild != nil { + nw.current = nw.current.FirstChild + nw.entering = true + } else { + nw.entering = false + } + } else if nw.current.Next == nil { + nw.current = nw.current.Parent + nw.entering = false + } else { + nw.current = nw.current.Next + nw.entering = true + } +} + +func dump(ast *Node) { + fmt.Println(dumpString(ast)) +} + +func dumpR(ast *Node, depth int) string { + if ast == nil { + return "" + } + indent := bytes.Repeat([]byte("\t"), depth) + content := ast.Literal + if content == nil { + content = ast.content + } + result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content) + for n := ast.FirstChild; n != nil; n = n.Next { + result += dumpR(n, depth+1) + } + return result +} + +func dumpString(ast *Node) string { + return dumpR(ast, 0) +} diff --git a/vendor/github.com/russross/blackfriday/v2/smartypants.go b/vendor/github.com/russross/blackfriday/v2/smartypants.go new file mode 100644 index 0000000..3a220e9 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/smartypants.go @@ -0,0 +1,457 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// SmartyPants rendering +// +// + +package blackfriday + +import ( + "bytes" + "io" +) + +// SPRenderer is a struct containing state of a Smartypants renderer. +type SPRenderer struct { + inSingleQuote bool + inDoubleQuote bool + callbacks [256]smartCallback +} + +func wordBoundary(c byte) bool { + return c == 0 || isspace(c) || ispunct(c) +} + +func tolower(c byte) byte { + if c >= 'A' && c <= 'Z' { + return c - 'A' + 'a' + } + return c +} + +func isdigit(c byte) bool { + return c >= '0' && c <= '9' +} + +func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { + // edge of the buffer is likely to be a tag that we don't get to see, + // so we treat it like text sometimes + + // enumerate all sixteen possibilities for (previousChar, nextChar) + // each can be one of {0, space, punct, other} + switch { + case previousChar == 0 && nextChar == 0: + // context is not any help here, so toggle + *isOpen = !*isOpen + case isspace(previousChar) && nextChar == 0: + // [ "] might be [ "foo...] + *isOpen = true + case ispunct(previousChar) && nextChar == 0: + // [!"] hmm... could be [Run!"] or [("...] + *isOpen = false + case /* isnormal(previousChar) && */ nextChar == 0: + // [a"] is probably a close + *isOpen = false + case previousChar == 0 && isspace(nextChar): + // [" ] might be [...foo" ] + *isOpen = false + case isspace(previousChar) && isspace(nextChar): + // [ " ] context is not any help here, so toggle + *isOpen = !*isOpen + case ispunct(previousChar) && isspace(nextChar): + // [!" ] is probably a close + *isOpen = false + case /* isnormal(previousChar) && */ isspace(nextChar): + // [a" ] this is one of the easy cases + *isOpen = false + case previousChar == 0 && ispunct(nextChar): + // ["!] hmm... could be ["$1.95] or ["!...] + *isOpen = false + case isspace(previousChar) && ispunct(nextChar): + // [ "!] looks more like [ "$1.95] + *isOpen = true + case ispunct(previousChar) && ispunct(nextChar): + // [!"!] context is not any help here, so toggle + *isOpen = !*isOpen + case /* isnormal(previousChar) && */ ispunct(nextChar): + // [a"!] is probably a close + *isOpen = false + case previousChar == 0 /* && isnormal(nextChar) */ : + // ["a] is probably an open + *isOpen = true + case isspace(previousChar) /* && isnormal(nextChar) */ : + // [ "a] this is one of the easy cases + *isOpen = true + case ispunct(previousChar) /* && isnormal(nextChar) */ : + // [!"a] is probably an open + *isOpen = true + default: + // [a'b] maybe a contraction? + *isOpen = false + } + + // Note that with the limited lookahead, this non-breaking + // space will also be appended to single double quotes. + if addNBSP && !*isOpen { + out.WriteString(" ") + } + + out.WriteByte('&') + if *isOpen { + out.WriteByte('l') + } else { + out.WriteByte('r') + } + out.WriteByte(quote) + out.WriteString("quo;") + + if addNBSP && *isOpen { + out.WriteString(" ") + } + + return true +} + +func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + t1 := tolower(text[1]) + + if t1 == '\'' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { + out.WriteString("’") + return 0 + } + + if len(text) >= 3 { + t2 := tolower(text[2]) + + if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && + (len(text) < 4 || wordBoundary(text[3])) { + out.WriteString("’") + return 0 + } + } + } + + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { + return 0 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 { + t1 := tolower(text[1]) + t2 := tolower(text[2]) + + if t1 == 'c' && t2 == ')' { + out.WriteString("©") + return 2 + } + + if t1 == 'r' && t2 == ')' { + out.WriteString("®") + return 2 + } + + if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { + out.WriteString("™") + return 3 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + if text[1] == '-' { + out.WriteString("—") + return 1 + } + + if wordBoundary(previousChar) && wordBoundary(text[1]) { + out.WriteString("–") + return 0 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '-' && text[2] == '-' { + out.WriteString("—") + return 2 + } + if len(text) >= 2 && text[1] == '-' { + out.WriteString("–") + return 1 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { + if bytes.HasPrefix(text, []byte(""")) { + nextChar := byte(0) + if len(text) >= 7 { + nextChar = text[6] + } + if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { + return 5 + } + } + + if bytes.HasPrefix(text, []byte("�")) { + return 3 + } + + out.WriteByte('&') + return 0 +} + +func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { + var quote byte = 'd' + if angledQuotes { + quote = 'a' + } + + return func(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) + } +} + +func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '.' && text[2] == '.' { + out.WriteString("…") + return 2 + } + + if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { + out.WriteString("…") + return 4 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 && text[1] == '`' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b + // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8) + // and avoid changing dates like 1/23/2005 into fractions. + numEnd := 0 + for len(text) > numEnd && isdigit(text[numEnd]) { + numEnd++ + } + if numEnd == 0 { + out.WriteByte(text[0]) + return 0 + } + denStart := numEnd + 1 + if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { + denStart = numEnd + 3 + } else if len(text) < numEnd+2 || text[numEnd] != '/' { + out.WriteByte(text[0]) + return 0 + } + denEnd := denStart + for len(text) > denEnd && isdigit(text[denEnd]) { + denEnd++ + } + if denEnd == denStart { + out.WriteByte(text[0]) + return 0 + } + if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { + out.WriteString("") + out.Write(text[:numEnd]) + out.WriteString("") + out.Write(text[denStart:denEnd]) + out.WriteString("") + return denEnd - 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + if text[0] == '1' && text[1] == '/' && text[2] == '2' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { + out.WriteString("½") + return 2 + } + } + + if text[0] == '1' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { + out.WriteString("¼") + return 2 + } + } + + if text[0] == '3' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { + out.WriteString("¾") + return 2 + } + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { + out.WriteString(""") + } + + return 0 +} + +func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') +} + +func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') +} + +func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { + i := 0 + + for i < len(text) && text[i] != '>' { + i++ + } + + out.Write(text[:i+1]) + return i +} + +type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int + +// NewSmartypantsRenderer constructs a Smartypants renderer object. +func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer { + var ( + r SPRenderer + + smartAmpAngled = r.smartAmp(true, false) + smartAmpAngledNBSP = r.smartAmp(true, true) + smartAmpRegular = r.smartAmp(false, false) + smartAmpRegularNBSP = r.smartAmp(false, true) + + addNBSP = flags&SmartypantsQuotesNBSP != 0 + ) + + if flags&SmartypantsAngledQuotes == 0 { + r.callbacks['"'] = r.smartDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpRegular + } else { + r.callbacks['&'] = smartAmpRegularNBSP + } + } else { + r.callbacks['"'] = r.smartAngledDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpAngled + } else { + r.callbacks['&'] = smartAmpAngledNBSP + } + } + r.callbacks['\''] = r.smartSingleQuote + r.callbacks['('] = r.smartParens + if flags&SmartypantsDashes != 0 { + if flags&SmartypantsLatexDashes == 0 { + r.callbacks['-'] = r.smartDash + } else { + r.callbacks['-'] = r.smartDashLatex + } + } + r.callbacks['.'] = r.smartPeriod + if flags&SmartypantsFractions == 0 { + r.callbacks['1'] = r.smartNumber + r.callbacks['3'] = r.smartNumber + } else { + for ch := '1'; ch <= '9'; ch++ { + r.callbacks[ch] = r.smartNumberGeneric + } + } + r.callbacks['<'] = r.smartLeftAngle + r.callbacks['`'] = r.smartBacktick + return &r +} + +// Process is the entry point of the Smartypants renderer. +func (r *SPRenderer) Process(w io.Writer, text []byte) { + mark := 0 + for i := 0; i < len(text); i++ { + if action := r.callbacks[text[i]]; action != nil { + if i > mark { + w.Write(text[mark:i]) + } + previousChar := byte(0) + if i > 0 { + previousChar = text[i-1] + } + var tmp bytes.Buffer + i += action(&tmp, previousChar, text[i:]) + w.Write(tmp.Bytes()) + mark = i + 1 + } + } + if mark < len(text) { + w.Write(text[mark:]) + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt new file mode 100644 index 0000000..58e0c37 --- /dev/null +++ b/vendor/modules.txt @@ -0,0 +1,3 @@ +# github.com/russross/blackfriday/v2 v2.1.0 +## explicit +github.com/russross/blackfriday/v2