0
0
Fork 0

update to new generator

This commit is contained in:
Gibheer 2022-03-25 14:41:22 +01:00
parent 953b419485
commit d58ebeab52
79 changed files with 9824 additions and 1346 deletions

View File

@ -13,8 +13,11 @@ FILEMODE = 444
all: clean build all: clean build
dev:
go run main.go --content-dir content --template-dir templates --static-dir static --listen "127.0.0.1:8080"
build: build:
hugo go run main.go --content-dir content --template-dir templates --static-dir static --output-dir $(HTTPDIR)
clean: clean:
-rm -r public/* -rm -r public/*

View File

@ -1,11 +0,0 @@
baseurl = ""
languageCode = "en-us"
title = "zero-knowledge"
theme = "zero"
SectionPagesMenu = "main"
Paginate = 12
disableRSS = false
[taxonomies]
author = "author"
tag = "tags"

View File

@ -1,11 +0,0 @@
+++
date = "2015-10-11T20:00:29+02:00"
draft = true
title = "about"
+++
## about zero-knowledge
This blog is the personal blog of Gibheer and Stormwind, where we write about
any topic from IT which keeps us working at the moment.

View File

@ -1,6 +1,7 @@
+++ +++
title = "Gibheer" title = "Gibheer"
date = "2015-11-04T12:23:00+02:00" date = "2015-11-04T12:23:00+02:00"
url = "/author/Gibheer"
+++ +++
## about me ## about me
@ -17,8 +18,8 @@ learn from it and try it another way next time.
Most of the stuff I try in private are online either on github or my own git Most of the stuff I try in private are online either on github or my own git
server. What isn't code, I try to write down on the blog. server. What isn't code, I try to write down on the blog.
As for social media, I'm on [freenode](irc://irc.freenode.org/) under the nick As for social media, I'm on [libera.chat](ircs://irc.libera.chat:6697) with the nick
Gibheer. 'Gibheer'.
## links ## links

View File

@ -1,6 +1,7 @@
+++ +++
title = "Stormwind" title = "Stormwind"
date = "2015-11-04T12:40:00+02:00" date = "2015-11-04T12:40:00+02:00"
url = "/author/Stormwind"
+++ +++
introduction introduction

135
content/index.md Normal file
View File

@ -0,0 +1,135 @@
+++
title = "blog"
author = "gibheer"
url = "/"
template = "index.html"
+++
This blog is maintained by [Gibheer](/author/Gibheer) and [Stormwind](/author/Stormwind)
about various topics.
* [link summary 2016/07/08](post/127.md)
* [poudriere in jails with zfs](post/126.md)
* [gotchas with IPs and Jails](post/125.md)
* [link summary 2016/04/09](post/124.md)
* [json/curl to go](post/123.md)
* [configuring raids on freebsd](post/122.md)
* [fast application locks](post/121.md)
* [new blog engine](post/120.md)
* [ssh certificates part 2](post/119.md)
* [ssh certificates part 1](post/118.md)
* [S.M.A.R.T. values](post/117.md)
* [minimal nginx configuration](post/115.md)
* [pgstats - vmstat like stats for postgres](post/114.md)
* [setting zpool features](post/113.md)
* [using unbound and dnsmasq](post/112.md)
* [common table expressions in postgres](post/111.md)
* [range types in postgres](post/110.md)
* [learning the ansible way](post/109.md)
* [playing with go](post/108.md)
* [no cfengine anymore](post/107.md)
* [scan to samba share with HP Officejet pro 8600](post/106.md)
* [\[cfengine\] log to syslog](post/105.md)
* [overhaul of the blog](post/104.md)
* [block mails for unknown users](post/103.md)
* [choosing a firewall on freebsd](post/102.md)
* [use dovecot to store mails with lmtp](post/100.md)
* [grub can't read zpool](post/99.md)
* [sysidcfg replacement on omnios](post/98.md)
* [filter program logs in freebsd syslog](post/97.md)
* [moving a zone between zpools](post/96.md)
* [compile errors on omnios with llvm](post/95.md)
* [inner and natural joins](post/94.md)
* [release of zero 0.1.0](post/93.md)
* [building a multi instance postgres systemd service](post/92.md)
* [automatic locking of the screen](post/91.md)
* [rotate log files with logadm](post/90.md)
* [Solaris SMF on linux with systemd](post/89.md)
* [create encrypted password for postgresql](post/88.md)
* [extend PATH in Makefile](post/87.md)
* [touchpad keeps scrolling](post/86.md)
* [Schwarze Seelen brauchen bunte Socken 2012.1](post/85.md)
* [Backups with ZFS over the wire](post/84.md)
* [the Illumos eco system](post/83.md)
* [archlinux + rubygems = gem executables will not run](post/82.md)
* [Lustige Gehversuche mit... verschlüsselten Festplatten](post/81.md)
* [find cycle detected](post/80.md)
* [openindiana - getting rubinius to work](post/79.md)
* [openindiana - curl CA failure](post/78.md)
* [openindiana - set up ssh with kerberos authentication](post/77.md)
* [great resource to ipfilter](post/76.md)
* [openindiana - ntpd does not start](post/75.md)
* [openindiana - how to configure a zone](post/74.md)
* [openindiana - how to get routing working](post/73.md)
* [How to use sysidcfg for zone deployment](post/72.md)
* [set environment variables in smf manifests](post/71.md)
* [get pfexec back in Solaris](post/70.md)
* [Solaris - a new way to 'ifconfig'](post/69.md)
* [OpenIndiana 151a released](post/68.md)
* [PostgreSQL 9.1 was released](post/67.md)
* [SmartOS - hype and a demo iso](post/66.md)
* [SmartOS - a new Solaris](post/65.md)
* [neues Lebenszeichen - neuer Blog](post/64.md)
* [Accesslogs in die Datenbank](post/63.md)
* [Schwarze Seelen brauchen bunte Socken - Teil 3](post/62.md)
* [Technik hinter dem neuen Blog](post/61.md)
* [jede Menge Umzuege](post/60.md)
* [DTrace fuer den Linuxlator in FreeBSD](post/59.md)
* [daily zfs snapshots](post/58.md)
* [Dokumentation in Textile schreiben](post/57.md)
* [Shells in anderen Sprachen](post/56.md)
* [ZFS Versionen](post/55.md)
* [Spielwahn mit Wasser](post/54.md)
* [FreeBSD Status Report Juli - September 2010](post/53.md)
* [Spass mit test-driven development](post/52.md)
* [dtrace userland in FreeBSD head](post/51.md)
* [Alle Tabellen einer DB loeschen mit PostgreSQL 9.0](post/50.md)
* [Shellbefehle im Vim ausfuehren](post/49.md)
* [zero-knowledge mit IPv6 Teil 2](post/48.md)
* [[Rubyconf 2009] Worst Ideas Ever](post/47.md)
* [Nachfolger von Tex](post/46.md)
* [Linux und Windows im Auto](post/45.md)
* [zero-knowledge jetzt auch per IPv6](post/44.md)
* [Der Drackenzackenschal](post/43.md)
* [Kalender auf der Konsole](post/42.md)
* [NetBeans 6.9 released](post/41.md)
* [Das Wollefest in Nierstein](post/40.md)
* [PostgreSQL - mehrere Werte aus einer Funktion](post/39.md)
* [Schwarze Seelen brauchen bunte Socken - Teil 2](post/38.md)
* [Serverumzug vollendet](post/37.md)
* [MySQL kann Datensaetze \"zerreissen\"](post/36.md)
* [Umzug mit OpenSolaris 20x0.xx](post/35.md)
* [Blub gibt es ab sofort auch fuer unterwegs](post/34.md)
* [OpenSolaris Zones mit statischer IP](post/33.md)
* [Blog nicht da](post/32.md)
* [gefaehrliches Spiel fuer das n900](post/31.md)
* [neuer CLI-Client fuer XMMS2](post/30.md)
* [Claws Mail laeuft auf OpenSolaris](post/29.md)
* [publisher contains only packages from other publisher](post/28.md)
* [PostgreSQL 8.4 in OpenSolaris](post/27.md)
* [mit PHP Mailadressen validieren](post/26.md)
* [Lustige Gehversuche mit ...](post/25.md)
* [Performance, Programme und viel Musik](post/24.md)
* [von Linux zu OpenSolaris](post/23.md)
* [Gibheers zsh-config](post/22.md)
* [Crossbow mit Solaris Containern](post/21.md)
* [Lustige Gehversuche mit Gentoo/FreeBSD](post/20.md)
* [Heidelbeertigerarmstulpen](post/19.md)
* [OpenVPN unter OpenSolaris](post/18.md)
* [OpenSolaris Wiki](post/17.md)
* [OpenSolaris ohne Reboot updaten](post/16.md)
* [einzelne Pakete unter OpenSolaris updaten](post/15.md)
* [Rails mit Problemen unter OpenSolaris](post/14.md)
* [Wie wenig braucht OpenSolaris?](post/13.md)
* [das eklige Gesicht XMLs](post/12.md)
* [Dokumentation fuer (Open)Solaris](post/11.md)
* [Woche der Updates](post/10.md)
* [Was ist XMMS2?](post/9.md)
* [Rack und XMMS2](post/8.md)
* [Webserver unter Ruby](post/7.md)
* [Symbole in Ruby](post/6.md)
* [Schwarze Seelen brauchen bunte Socken](post/5.md)
* [Zero-knowledge spielt wieder Icewars](post/4.md)
* [Serendipity als Blog?](post/3.md)
* [Indizes statt Tabellen](post/2.md)
* [zero-knowledge ohne Forum](post/1.md)

View File

@ -36,8 +36,8 @@ naja, jetzt brauche ich unbedingt ganz viel Wolle.
Hier nochmal ein Dank an Nathalie und ihre Mutter, die beide den Hier nochmal ein Dank an Nathalie und ihre Mutter, die beide den
Workshop betreut haben. Das hat echt Spaß gemacht und ich denke ich Workshop betreut haben. Das hat echt Spaß gemacht und ich denke ich
werde auch in Zukunft noch ganz viel zumspinnen. :)\ werde auch in Zukunft noch ganz viel zumspinnen. :)\
!(float\_right)/images/wolle4.jpg(4 Knaeule bunte Wolle vom ![4 Knaeule bunte Wolle vom Wolldrachen](/static/pics/wolle4.jpg)
Wolldrachen)!\
Desweiteren muss ich erzählen, dass der Desweiteren muss ich erzählen, dass der
[Wolldrache](http://drachenwolle.de/) auch hier mit ihrem Stand zu [Wolldrache](http://drachenwolle.de/) auch hier mit ihrem Stand zu
finden war. Und das gemeinerweise direkt am Anfang des Festplatzes. finden war. Und das gemeinerweise direkt am Anfang des Festplatzes.

View File

@ -5,10 +5,10 @@ author = "Gibheer"
draft = false draft = false
+++ +++
Nachdem es hier lange Still war, gibt es mal wieder ein Update. In der zwischenzeit haben wir den Blog auf eine eigene Software umgezogen, weil uns Jekyll nicht gepasst hat. Fuer mich war es zwar einfach von der Konsole aus die Beitraege zu verfassen, allerdings fehlte die Moeglichkeit auch mal von unterwegs "schnell" etwas zu verfassen. Nachdem es hier lange Still war, gibt es mal wieder ein Update. In der zwischenzeit haben wir den Blog auf eine eigene Software umgezogen, weil uns Jekyll nicht gepasst hat. Fuer mich war es zwar einfach von der Konsole aus die Beitraege zu verfassen, allerdings fehlte die Moeglichkeit auch mal von unterwegs "schnell" etwas zu verfassen.
Nun haben wir eine eigene Blogsoftware (die auch auf github liegt). Mal schauen wie gut wir damit zurecht kommen. Im Gegensatz zu jekyll generieren wir keine statischen Files, sondern der Content wird in der Datenbank gespeichert und bei jedem Request neu generiert. Das ist im Moment noch etwas langsam, aber da werd ich noch was bauen, damit das besser passt. Nun haben wir eine eigene Blogsoftware (die auch auf github liegt). Mal schauen wie gut wir damit zurecht kommen. Im Gegensatz zu jekyll generieren wir keine statischen Files, sondern der Content wird in der Datenbank gespeichert und bei jedem Request neu generiert. Das ist im Moment noch etwas langsam, aber da werd ich noch was bauen, damit das besser passt.
Es wird noch eine Kommentarfunktion hinzukommen und es ist geplant unterschiedliche Typen von Blogposts machen zu koennen. Ersteres wird wahrscheinlich recht einfach werden, letztes ist im Moment nur eine grobe Idee in meinem Kopf. Es wird noch eine Kommentarfunktion hinzukommen und es ist geplant unterschiedliche Typen von Blogposts machen zu koennen. Ersteres wird wahrscheinlich recht einfach werden, letztes ist im Moment nur eine grobe Idee in meinem Kopf.
Es ist auf jeden Fall ein nettes Experiment und mal schauen, wie es sich in Zukunft weiter entwickeln wird. Es ist auf jeden Fall ein nettes Experiment und mal schauen, wie es sich in Zukunft weiter entwickeln wird.

View File

@ -5,16 +5,16 @@ author = "Gibheer"
draft = false draft = false
+++ +++
Some minutes ago I saw on [hacker news](http://news.ycombinator.com/) the following line [Joyent Open Sources SmartOS: Zones, ZFS, DTrace and KVM (smartos.org)](http://smartos.org/). Some minutes ago I saw on [hacker news](http://news.ycombinator.com/) the following line [Joyent Open Sources SmartOS: Zones, ZFS, DTrace and KVM (smartos.org)](http://smartos.org/).
Who is behind SmartOS? Who is behind SmartOS?
====================== ======================
What does that mean? I took a look and it seems, that Joyent, the company behind [node.js](http://nodejs.org/), has released their distribution of [Illumos](https://www.illumos.org/). What does that mean? I took a look and it seems, that Joyent, the company behind [node.js](http://nodejs.org/), has released their distribution of [Illumos](https://www.illumos.org/).
After the merge of sun and oracle, OpenSolaris as a project was closed in favor of Solaris11. As OpenSolaris was OpenSource the project Illumos emerged from the remains of OpenSolaris, but there was no release of the Illumos kernel in any project till now. After the merge of sun and oracle, OpenSolaris as a project was closed in favor of Solaris11. As OpenSolaris was OpenSource the project Illumos emerged from the remains of OpenSolaris, but there was no release of the Illumos kernel in any project till now.
So what is different? So what is different?
===================== =====================
The first things I saw on their page are dtrace zfs and zones. So it's a standard solaris. But there is more: *KVM*! If the existence of zones means also, that it has crossbow and resource limits, then it would be absolutely gorgeous! It would be possible to build the core services on solaris zones and on top of that multiple dev or production machines with linux, windows or whatever you want. The first things I saw on their page are dtrace zfs and zones. So it's a standard solaris. But there is more: *KVM*! If the existence of zones means also, that it has crossbow and resource limits, then it would be absolutely gorgeous! It would be possible to build the core services on solaris zones and on top of that multiple dev or production machines with linux, windows or whatever you want.
I will test it first in a virtual box to see, how stable and usable it really is, as there is no documentation on the website yet. After my test I will report back. I will test it first in a virtual box to see, how stable and usable it really is, as there is no documentation on the website yet. After my test I will report back.

View File

@ -5,12 +5,12 @@ author = "Gibheer"
draft = false draft = false
+++ +++
So, there is this new distribution of Illumos, [SmartOS](http://smartos.org) but it's not as ready as they claimed. Sure, there is an ISO but that ISO has no installer and no package manager. So one of the crucial part for using SmartOS is missing. So, there is this new distribution of Illumos, [SmartOS](http://smartos.org) but it's not as ready as they claimed. Sure, there is an ISO but that ISO has no installer and no package manager. So one of the crucial part for using SmartOS is missing.
As Joyent wrote on the [blog](http://blog.smartos.org) they are working on a wiki and the documentation and this night, they showed the [wiki](http://wiki.smartos.org). Until now there is only a documentation on how to use the usb image which got released the same time. But i think, that there will be much more coming. As Joyent wrote on the [blog](http://blog.smartos.org) they are working on a wiki and the documentation and this night, they showed the [wiki](http://wiki.smartos.org). Until now there is only a documentation on how to use the usb image which got released the same time. But i think, that there will be much more coming.
At the same time I found out, that kvm was released into the Illumos core too, so that kvm will be available with every other distribution too. And [OpenIndiana](http://openindiana.org) said, they want it in their 151 release too. 151 was planned to be released some months ago, so let's see, how fast they can get that out to the users. At the same time I found out, that kvm was released into the Illumos core too, so that kvm will be available with every other distribution too. And [OpenIndiana](http://openindiana.org) said, they want it in their 151 release too. 151 was planned to be released some months ago, so let's see, how fast they can get that out to the users.
Joyent too should release a real distribution as fast as they can, because they created a large hype for SmartOS, but have nothing to use it in production. The ports are missing and an upgrade path is missing too. They wrote, that they are already using it in production, so why did they not release that? Joyent too should release a real distribution as fast as they can, because they created a large hype for SmartOS, but have nothing to use it in production. The ports are missing and an upgrade path is missing too. They wrote, that they are already using it in production, so why did they not release that?
Illumos, OpenIndiana and Joyent with SmartOS are missing a big chance here to make that fork of OpenSolaris popular. They created much traction, but without having something, which could be used in production. We will see, how fast they can react. Hopefully, the release of either OpenIndiana or SmartOS, will be useable and stable in production. Then, they have a chance of getting me as an user. Illumos, OpenIndiana and Joyent with SmartOS are missing a big chance here to make that fork of OpenSolaris popular. They created much traction, but without having something, which could be used in production. We will see, how fast they can react. Hopefully, the release of either OpenIndiana or SmartOS, will be useable and stable in production. Then, they have a chance of getting me as an user.

View File

@ -5,6 +5,6 @@ author = "Gibheer"
draft = false draft = false
+++ +++
Yesterday PostgreSQL 9.1 was released. It has some neat features included, like writable common table expressions, synchronized replication and unlogged tables. Apart from that, some performance tuning was included as well. Yesterday PostgreSQL 9.1 was released. It has some neat features included, like writable common table expressions, synchronized replication and unlogged tables. Apart from that, some performance tuning was included as well.
If you are interested, take a look yourself at the [release notes](http://www.postgresql.org/about/news.1349) If you are interested, take a look yourself at the [release notes](http://www.postgresql.org/about/news.1349)

View File

@ -5,10 +5,10 @@ author = "Gibheer"
draft = false draft = false
+++ +++
After the release of [PostgreSQL 9.1](http://www.postgresql.org/about/news.1349), today another great open source project released a new version - [OpenIndiana](http://wiki.openindiana.org/oi/oi_151a+Release+Notes). After the release of [PostgreSQL 9.1](http://www.postgresql.org/about/news.1349), today another great open source project released a new version - [OpenIndiana](http://wiki.openindiana.org/oi/oi_151a+Release+Notes).
OpenIndiana is based on a fork of OpenSolaris, named [Illumos](http://illumos.org). It was announced in august 2010. OpenIndiana has evolved since that time and got a stable release 148 and today 151a. That release is very solid and got one thing, which Solaris 11 has and most likely will never have: *KVM*. OpenIndiana is based on a fork of OpenSolaris, named [Illumos](http://illumos.org). It was announced in august 2010. OpenIndiana has evolved since that time and got a stable release 148 and today 151a. That release is very solid and got one thing, which Solaris 11 has and most likely will never have: *KVM*.
So from today you get a Solaris fork with crossbow, resource containers, zones and the kernel virtual machine, converted from linux to Illumos from the developers of [Joyent](http://joyent.com). They built there own distribution, [SmartOS](http://smartos.org), which is a bootable OS for managing a cloud like setup but without the zones. So from today you get a Solaris fork with crossbow, resource containers, zones and the kernel virtual machine, converted from linux to Illumos from the developers of [Joyent](http://joyent.com). They built there own distribution, [SmartOS](http://smartos.org), which is a bootable OS for managing a cloud like setup but without the zones.
So if you have a large Infrastructure and want to seperate some programs from each other or have some old infrastructure, try OpenIndiana and it's zones and kvm. So if you have a large Infrastructure and want to seperate some programs from each other or have some old infrastructure, try OpenIndiana and it's zones and kvm.

View File

@ -5,8 +5,8 @@ author = "Gibheer"
draft = true draft = true
+++ +++
kleinere Hilfestellungen zu ipadm kleinere Hilfestellungen zu ipadm
http://192.9.164.72/bin/view/Project+brussels/ifconfig_ipadm_feature_mapping http://192.9.164.72/bin/view/Project+brussels/ifconfig_ipadm_feature_mapping
http://arc.opensolaris.org/caselog/PSARC/2010/080/materials/ipadm.1m.txt http://arc.opensolaris.org/caselog/PSARC/2010/080/materials/ipadm.1m.txt
http://blog.allanglesit.com/2011/03/solaris-11-network-configuration-basics/ http://blog.allanglesit.com/2011/03/solaris-11-network-configuration-basics/

View File

@ -5,13 +5,13 @@ author = "Gibheer"
draft = false draft = false
+++ +++
If you tried Solaris 11 or OpenIndiana in a fresh installation, you may have noticed, that pfexec may not work the way you are used to. I asked in #openindiana on `irc.freenode.org` and I was told, that the behavior was changed. OpenSolaris was used to have an `Primary Administrator` profile which got assigned to the first account created on the installation. The problem with that is the same as on Windows - you are doing everything with the administrator or root account. To avoid that, sudo was introduced, which needs the password of your account with the default settings. What both tools are very different at what they do and at what they are good at. So it's up to the administrator to define secure roles where appropriate and use sudo rules for the parts, which have to be more secured. If you tried Solaris 11 or OpenIndiana in a fresh installation, you may have noticed, that pfexec may not work the way you are used to. I asked in #openindiana on `irc.freenode.org` and I was told, that the behavior was changed. OpenSolaris was used to have an `Primary Administrator` profile which got assigned to the first account created on the installation. The problem with that is the same as on Windows - you are doing everything with the administrator or root account. To avoid that, sudo was introduced, which needs the password of your account with the default settings. What both tools are very different at what they do and at what they are good at. So it's up to the administrator to define secure roles where appropriate and use sudo rules for the parts, which have to be more secured.
If you want back the old behavior, these two steps should be enough. But keep in mind, that it is important that you secure your system, to avoid misuse. If you want back the old behavior, these two steps should be enough. But keep in mind, that it is important that you secure your system, to avoid misuse.
* there should be line like the following in `/etc/security/prof_attr` * there should be line like the following in `/etc/security/prof_attr`
`Primary Administrator:::Can perform all administrative tasks:auths=solaris.*,solaris.grant;help=RtPriAdmin.html` `Primary Administrator:::Can perform all administrative tasks:auths=solaris.*,solaris.grant;help=RtPriAdmin.html`
* if there is, then you can add that profile to your user with * if there is, then you can add that profile to your user with
`usermod -P'Primary Administrator` <username> `usermod -P'Primary Administrator` <username>
It is possible to combine these two mechanics too. You could build a zone to ssh into the box with a key and from there, ssh with sudo and a password into the internal systems. It is possible to combine these two mechanics too. You could build a zone to ssh into the box with a key and from there, ssh with sudo and a password into the internal systems.

View File

@ -5,16 +5,16 @@ author = "Gibheer"
draft = false draft = false
+++ +++
If you are in the need to set an environment variable for an smf service, you are looking for envvar. It get's set in the `service` scope or in the `exec_method` scope. Here is a small example, how it's used. If you are in the need to set an environment variable for an smf service, you are looking for envvar. It get's set in the `service` scope or in the `exec_method` scope. Here is a small example, how it's used.
``` ```
<exec_method type="method" name="start" exec="/bin/bash"> <exec_method type="method" name="start" exec="/bin/bash">
<method_context> <method_context>
<method_environment> <method_environment>
<envvar name="FOO" value="bar" /> <envvar name="FOO" value="bar" />
</method_environment> </method_environment>
</method_context> </method_context>
</exec_method> </exec_method>
``` ```
This example sets the environment variable `FOO` to bar. This is espacially useful, when you have to modify `PATH` or `LD_LIBRARY_PATH`. Just don't forget, that you did it. This example sets the environment variable `FOO` to bar. This is espacially useful, when you have to modify `PATH` or `LD_LIBRARY_PATH`. Just don't forget, that you did it.

View File

@ -5,28 +5,28 @@ author = "Gibheer"
draft = false draft = false
+++ +++
This is mostly for myself that I can remember how to use the least documented feature of Solaris and openindiana - the `sysidcfg` files. This is mostly for myself that I can remember how to use the least documented feature of Solaris and openindiana - the `sysidcfg` files.
These files help deploying new zones faster, as you don't have to configure them by hand afterwards. But what is the syntax and how can you use them? These files help deploying new zones faster, as you don't have to configure them by hand afterwards. But what is the syntax and how can you use them?
Here is an example file Here is an example file
name_service=NONE name_service=NONE
# name_service=DNS {domain_name=<your_domain> name_server=<your_dns_server>} # name_service=DNS {domain_name=<your_domain> name_server=<your_dns_server>}
nfs4_domain=dynamic nfs4_domain=dynamic
timezone=Europe/Stockholm timezone=Europe/Stockholm
terminal=xterms terminal=xterms
root_password=<crypted_password> root_password=<crypted_password>
security_policy=NONE security_policy=NONE
network_interface=<interface1> {primary hostname=<hostname> default_route=<route_ip> ip_address=<if_ip> netmask=<if_netmask> protocol_ipv6=yes} network_interface=<interface1> {primary hostname=<hostname> default_route=<route_ip> ip_address=<if_ip> netmask=<if_netmask> protocol_ipv6=yes}
network_interface=<interface2> {hostname=<hostname> ip_address=<if_ip> netmask=<if_netmask> protocol_ipv6=yes default_route=NONE}` network_interface=<interface2> {hostname=<hostname> ip_address=<if_ip> netmask=<if_netmask> protocol_ipv6=yes default_route=NONE}`
The most important thing first: you don't need system_locale after openindiana 151 anymore. If you have it in your config, even with C, delete it or else the setup will not work! The most important thing first: you don't need system_locale after openindiana 151 anymore. If you have it in your config, even with C, delete it or else the setup will not work!
If you don't have a dns record for your zone yet, set the @name_service@ to NONE. If you have already a record set, use the commented syntax. If you don't have a dns record for your zone yet, set the @name_service@ to NONE. If you have already a record set, use the commented syntax.
The next interesting setting is root_password. Here you don't input the password in cleartext but crypted. I wrote a little script to generate this string. You can find the code [here](https://github.com/Gibheer/zero-pwcrypter). The next interesting setting is root_password. Here you don't input the password in cleartext but crypted. I wrote a little script to generate this string. You can find the code [here](https://github.com/Gibheer/zero-pwcrypter).
The network_interface part is pretty easy, if you take these lines as a dummy. If you have only one interface, you can name the first interface PRIMARY. That way, you have a bit less to write. The network_interface part is pretty easy, if you take these lines as a dummy. If you have only one interface, you can name the first interface PRIMARY. That way, you have a bit less to write.
That's all so far. I will update this post, when I have figured out, what to fill into nfs4_domain and security_policy. That's all so far. I will update this post, when I have figured out, what to fill into nfs4_domain and security_policy.

View File

@ -5,62 +5,62 @@ author = "Gibheer"
draft = false draft = false
+++ +++
This time, we are going to get routing working on the global zone for our other zones. You can replace the global zone with another zone too, as the setup is the same. This time, we are going to get routing working on the global zone for our other zones. You can replace the global zone with another zone too, as the setup is the same.
What's needed? What's needed?
============== ==============
First, we need to install ipfilter, if it isn't already installed. To do that, just invoke First, we need to install ipfilter, if it isn't already installed. To do that, just invoke
# pkg install ipfilter # pkg install ipfilter
This will install the package filter and NAT engine. Latter is the part, we want to use now. This will install the package filter and NAT engine. Latter is the part, we want to use now.
We will asume, that the global zone has to interfaces with the following setup We will asume, that the global zone has to interfaces with the following setup
* bge0 -> 192.168.4.1/24 * bge0 -> 192.168.4.1/24
* bge1 -> 192.168.5.1/24 * bge1 -> 192.168.5.1/24
configure ipnat configure ipnat
=============== ===============
With `ipnat` installed, we need to write a small configuration. For this example, we set up routing for every machine in the subnet. With `ipnat` installed, we need to write a small configuration. For this example, we set up routing for every machine in the subnet.
For that, open the file `/etc/ipf/ipnat.conf` and write the following lines: For that, open the file `/etc/ipf/ipnat.conf` and write the following lines:
map bge0 192.168.5.0/24 -> 0/32 portmap tcp/udp auto map bge0 192.168.5.0/24 -> 0/32 portmap tcp/udp auto
map bge0 192.168.5.0/24 -> 0/32 map bge0 192.168.5.0/24 -> 0/32
These two lines say, that all packages from the subnet to the rest shall be relabeled and forwarded. These two lines say, that all packages from the subnet to the rest shall be relabeled and forwarded.
After that, all we need to do is enable the ipfilter and the routing deamons with the following commands. After that, all we need to do is enable the ipfilter and the routing deamons with the following commands.
# svcadm enable ipfilter # svcadm enable ipfilter
# routeadm -e ipv4-forwarding # routeadm -e ipv4-forwarding
# routeadm -e ipv4-routing # routeadm -e ipv4-routing
# routeadm -u # routeadm -u
The last command checks if all deamons are running according to the settings. To see, which settings are set and what the deamons are doing, run the `routeadm` command without any arguments. The last command checks if all deamons are running according to the settings. To see, which settings are set and what the deamons are doing, run the `routeadm` command without any arguments.
configure the zone configure the zone
================== ==================
Now we fire up the zone to test, if we can get anywhere near routing. In our case, the zone only has one interface, so that it detects the router itself per icmp. Now we fire up the zone to test, if we can get anywhere near routing. In our case, the zone only has one interface, so that it detects the router itself per icmp.
We can prove that very easy with We can prove that very easy with
# netstat -rn # netstat -rn
The default gateway should point to our global zone. To make a last test, you can ping an ip in another subnet. If the global zone says, this host is alive, the zone should do too. The default gateway should point to our global zone. To make a last test, you can ping an ip in another subnet. If the global zone says, this host is alive, the zone should do too.
A good IP to test is 8.8.8.8, as it is really easy to remember. A good IP to test is 8.8.8.8, as it is really easy to remember.
That was all. Have fun with your access That was all. Have fun with your access
links and hints links and hints
=============== ===============
You can get some more documentation to ipfilter and routing in the man pages of ipnat, ipf and routeadm. Some example rule sets for ipf can be found in `/usr/share/ipfilter/examples/nat.eg`. You can get some more documentation to ipfilter and routing in the man pages of ipnat, ipf and routeadm. Some example rule sets for ipf can be found in `/usr/share/ipfilter/examples/nat.eg`.
* [a rough setup of routing](http://blog.kevinvandervlist.nl/2011/06/openindiana-zone-with-nat/) * [a rough setup of routing](http://blog.kevinvandervlist.nl/2011/06/openindiana-zone-with-nat/)
* [NAT on solaris](http://www.rite-group.com/rich/solaris_nat.html) * [NAT on solaris](http://www.rite-group.com/rich/solaris_nat.html)

View File

@ -5,93 +5,93 @@ author = "Gibheer"
draft = false draft = false
+++ +++
In this short post, we will get a container running on a openindiana host. We will do some things in crossbow, but of the following stuff is just configuring the zone. At the end of this blog post, you will find some links to related pages. In this short post, we will get a container running on a openindiana host. We will do some things in crossbow, but of the following stuff is just configuring the zone. At the end of this blog post, you will find some links to related pages.
some preparations some preparations
================= =================
Make sure, that you have a free vnic created with dladm to use in the zone or else, we will have no network available. Further, we need a place on the filesystem, where our zone can be created. We need 500MB to 1.5GB of free space. Make sure, that you have a free vnic created with dladm to use in the zone or else, we will have no network available. Further, we need a place on the filesystem, where our zone can be created. We need 500MB to 1.5GB of free space.
writing a zone configuration writing a zone configuration
============================ ============================
In the first step, we have to write a zone configuration. You can use zonecfg directly, but it's better to write it into a textfile and let zonecfg read that file. That way, you can check the configuration into a vcs of your choice. In the first step, we have to write a zone configuration. You can use zonecfg directly, but it's better to write it into a textfile and let zonecfg read that file. That way, you can check the configuration into a vcs of your choice.
The config should look like this. The config should look like this.
create -b create -b
set zonepath=/zones/zone1 set zonepath=/zones/zone1
set ip-type=exclusive set ip-type=exclusive
set autoboot=false set autoboot=false
add net add net
set physical=zone1 set physical=zone1
end end
commit commit
With this configuration, we build a zone, which get's saved in `/zones`. `/zones` has to be a zfs partition or else the zone can not be created. With this configuration, we build a zone, which get's saved in `/zones`. `/zones` has to be a zfs partition or else the zone can not be created.
The sixth line sets the network device for the zone to the vnic `zone1`. The sixth line sets the network device for the zone to the vnic `zone1`.
Now we feed the file to zonecfg and let it create *zone1*. Now we feed the file to zonecfg and let it create *zone1*.
# zonecfg -z zone1 -f zone1.conf # zonecfg -z zone1 -f zone1.conf
installation of the zone installation of the zone
======================== ========================
The next step is to install the zone with the command: The next step is to install the zone with the command:
# zoneadm -z zone1 install # zoneadm -z zone1 install
or clone it from a template with or clone it from a template with
# zoneadm -z zone1 clone template_name # zoneadm -z zone1 clone template_name
Now we have to wait a bit and can write the next configuration file. Now we have to wait a bit and can write the next configuration file.
writing a sysidcfg writing a sysidcfg
================== ==================
I wrote a rough post about the [sysidcfg](http://zero-knowledge.org/post/72) already, so take a look there, if you are interested in further details. I wrote a rough post about the [sysidcfg](http://zero-knowledge.org/post/72) already, so take a look there, if you are interested in further details.
For this example, we use the following content. For this example, we use the following content.
name_service=NONE name_service=NONE
nfs4_domain=dynamic nfs4_domain=dynamic
terminal=xterms terminal=xterms
# the password is foobar # the password is foobar
root_password=0WMBUdFzAu6qU root_password=0WMBUdFzAu6qU
security_policy=NONE security_policy=NONE
network_interface=zone1 { network_interface=zone1 {
primary primary
hostname=zone1 hostname=zone1
default_route=NONE default_route=NONE
ip_address=192.168.5.3 ip_address=192.168.5.3
netmask=255.255.255.0 netmask=255.255.255.0
protocol_ipv6=no protocol_ipv6=no
} }
booting the zone booting the zone
================ ================
When the installation process has ended, copy the file to `/zones/zone1/root/etc/sysidcfg`. This way, the zone can read the file on the first boot and set most of the stuff. When the installation process has ended, copy the file to `/zones/zone1/root/etc/sysidcfg`. This way, the zone can read the file on the first boot and set most of the stuff.
# zoneadm -z zone1 boot # zoneadm -z zone1 boot
To check if everything gets configured, log into the zone and check the output. To check if everything gets configured, log into the zone and check the output.
# zlogin -e ! -C zone1 # zlogin -e ! -C zone1
It will take some time until the zone is ready to use, but it should not ask for further details. When the prompt shows, the configuration completed. It will take some time until the zone is ready to use, but it should not ask for further details. When the prompt shows, the configuration completed.
Now you can login into the zone and make further adjustments. Some topics will get their own blog entries here, so take a look at the other entries for help too. Now you can login into the zone and make further adjustments. Some topics will get their own blog entries here, so take a look at the other entries for help too.
links links
===== =====
Here are some links for further details to this topic: Here are some links for further details to this topic:
* [crossbow example from c0t0d0s0](http://www.c0t0d0s0.org/archives/5355-Upcoming-Solaris-Features-Crossbow-Part-1-Virtualisation.html) * [crossbow example from c0t0d0s0](http://www.c0t0d0s0.org/archives/5355-Upcoming-Solaris-Features-Crossbow-Part-1-Virtualisation.html)
* [howto sysidcfg](http://zero-knowledge.org/post/72) * [howto sysidcfg](http://zero-knowledge.org/post/72)

View File

@ -5,8 +5,8 @@ author = "Gibheer"
draft = true draft = true
+++ +++
Here comes a small hint for everybody else, who wants to run a ntp server in a zone: It does not work! Here comes a small hint for everybody else, who wants to run a ntp server in a zone: It does not work!
The reason for that is, that ntp needs access to the time facility of the kernel. But only global zones are allowed to access this part of the kernel. But don't worry, you don't need a ntp client on the zones, as they get their time information from the global zone. The reason for that is, that ntp needs access to the time facility of the kernel. But only global zones are allowed to access this part of the kernel. But don't worry, you don't need a ntp client on the zones, as they get their time information from the global zone.
That cost me about 4 hours to find out. I hope, this could save you some time. That cost me about 4 hours to find out. I hope, this could save you some time.

View File

@ -5,133 +5,133 @@ author = "Gibheer"
draft = false draft = false
+++ +++
This time, we will build a base kerberos setup. At the end, you will be able to login into another machine using kerberos only. This time, we will build a base kerberos setup. At the end, you will be able to login into another machine using kerberos only.
You need the following things, to make kerberos work: You need the following things, to make kerberos work:
* a working dns server * a working dns server
* 2 servers * 2 servers
I will explain this setup on an openindiana system with 2 zones. `kerberosp1` will be my kerberos machine and `sshp1` will be my ssh server with kerberos support. I will explain this setup on an openindiana system with 2 zones. `kerberosp1` will be my kerberos machine and `sshp1` will be my ssh server with kerberos support.
setup of kerberos setup of kerberos
================= =================
The setup of kerberos was pretty easy, after reading 3 tutorials about it. The essential part here is to decide, how the realm and the admin account should be called. The setup of kerberos was pretty easy, after reading 3 tutorials about it. The essential part here is to decide, how the realm and the admin account should be called.
To start the setup, call `kdcmgr`. At first, it asks your realm, which you should name like your domain. To start the setup, call `kdcmgr`. At first, it asks your realm, which you should name like your domain.
After that, you have to generate an admin principal.A principal is like an account for a user or admin. But it's also used for services. I named mine `kerberosp1/admin`. Give it a safe password and you are done. After that, you have to generate an admin principal.A principal is like an account for a user or admin. But it's also used for services. I named mine `kerberosp1/admin`. Give it a safe password and you are done.
Now you should have an populated `/etc/krb5/` directory. Open the file `kdc.conf` in that directory and search for `max_life`. It was set to 8 hours for me, which was too long. Adjust the value to 4h or 16h, like you want. I did the same with `max_renewable_life`. Now you should have an populated `/etc/krb5/` directory. Open the file `kdc.conf` in that directory and search for `max_life`. It was set to 8 hours for me, which was too long. Adjust the value to 4h or 16h, like you want. I did the same with `max_renewable_life`.
Edit: You should add the following option in the realms section to your realm. Edit: You should add the following option in the realms section to your realm.
kpasswd_protocol = SET_CHANGE kpasswd_protocol = SET_CHANGE
Kerberos uses a separate protocol for changing the password of principals. A RPC like protocol is used in the solaris version and microsoft has another one too. So the only option compatible on all is `SET_CHANGE`. But to make things worse, the solaris default does not even work in an internal network. So just add this entry and save some stress from trying to find out, why this is not working. Kerberos uses a separate protocol for changing the password of principals. A RPC like protocol is used in the solaris version and microsoft has another one too. So the only option compatible on all is `SET_CHANGE`. But to make things worse, the solaris default does not even work in an internal network. So just add this entry and save some stress from trying to find out, why this is not working.
setting up some accounts setting up some accounts
======================== ========================
To use the kerberos service, check first, if the kdc is running and start it, if it's not. For openindiana, the check is To use the kerberos service, check first, if the kdc is running and start it, if it's not. For openindiana, the check is
`svcs krb5kdc` `svcs krb5kdc`
which should return online. which should return online.
After that, as root start the kerberos shell with `kadmin.local`. This is a management shell to create, delete and modify principals. After that, as root start the kerberos shell with `kadmin.local`. This is a management shell to create, delete and modify principals.
Here we are going to create some policies. With these, we can set some minimal standards, like the minimum password length. Here we are going to create some policies. With these, we can set some minimal standards, like the minimum password length.
I created three policies. An `admin`, `user` and a `service` policy. These got the following settings: I created three policies. An `admin`, `user` and a `service` policy. These got the following settings:
* admin * admin
* minlength 8 * minlength 8
* minclasses 3 * minclasses 3
* user * user
* minlength 8 * minlength 8
* minclasses 2 * minclasses 2
* service * service
* minlength 12 * minlength 12
* minclasses 4 * minclasses 4
This sets some password limitations for every principal group I have. `minclasses` is used for different types of characters. There are lower case, upper case, numbers, punctation and other characters. This sets some password limitations for every principal group I have. `minclasses` is used for different types of characters. There are lower case, upper case, numbers, punctation and other characters.
The create a new policy use the command `addpol` or `add_policy` with `-minlength` and `-minclasses`. You can simply type the command to get some help or read the man page. The create a new policy use the command `addpol` or `add_policy` with `-minlength` and `-minclasses`. You can simply type the command to get some help or read the man page.
After creating the policies, we have to create some principals. First, we should create one for ourselves. You can do this with the command `addprinc` or `add_principal`. Give it a policy with the argument `-policy` and a name. You will have to input a password for this principal according to the policies. After creating the policies, we have to create some principals. First, we should create one for ourselves. You can do this with the command `addprinc` or `add_principal`. Give it a policy with the argument `-policy` and a name. You will have to input a password for this principal according to the policies.
You can use this scheme to create user accounts too. For that, you can generate a password for them with the program `pwgen`. It's pretty helpful and can generate pretty complex passwords, so that should be best. You can use this scheme to create user accounts too. For that, you can generate a password for them with the program `pwgen`. It's pretty helpful and can generate pretty complex passwords, so that should be best.
Now we need a principal for our ssh server. The name of this principal should be `host/name_of_service.your.domain.name`, so in my case, it is `host/sshp1.prod.lan`. But I did not want to generate any password and added the argument `-randkey` which generates a password according to the policies we set. Now we need a principal for our ssh server. The name of this principal should be `host/name_of_service.your.domain.name`, so in my case, it is `host/sshp1.prod.lan`. But I did not want to generate any password and added the argument `-randkey` which generates a password according to the policies we set.
Now we have to export the key of the last principal into a keytab file, that can be read by the service, which wants to use it. This is done with the command `ktadd` like this Now we have to export the key of the last principal into a keytab file, that can be read by the service, which wants to use it. This is done with the command `ktadd` like this
`ktadd -k /etc/krb5.keytab host/sshp1.prod.lan` `ktadd -k /etc/krb5.keytab host/sshp1.prod.lan`
This generates our file in /etc/krb5.keytab. Copy this file into the kerberos directory (on openindiana it's `/etc/krb5/`) and delete the one on the kerberos host. This is important, as another execution of ktadd will append the next key to that file. This generates our file in /etc/krb5.keytab. Copy this file into the kerberos directory (on openindiana it's `/etc/krb5/`) and delete the one on the kerberos host. This is important, as another execution of ktadd will append the next key to that file.
setting up ssh setting up ssh
============== ==============
For making ssh work with kerberos, we need `/etc/krb5/krb5.conf` and `/etc/krb5/krb5.keytab`. In the step before, we already moved the `krb5.keytab`. We can copy the `krb5.conf` from the kerberos server to the ssh server. For making ssh work with kerberos, we need `/etc/krb5/krb5.conf` and `/etc/krb5/krb5.keytab`. In the step before, we already moved the `krb5.keytab`. We can copy the `krb5.conf` from the kerberos server to the ssh server.
Now you can start the ssh deamon. Now you can start the ssh deamon.
try to log in try to log in
============= =============
For the test, we will try to connect to the ssh host from the kerberos host. So start a shell on the kerberos server and type `kinit`. This should ask for your password. If it was correct, `klist` should show you, that you have been granted a ticket. For the test, we will try to connect to the ssh host from the kerberos host. So start a shell on the kerberos server and type `kinit`. This should ask for your password. If it was correct, `klist` should show you, that you have been granted a ticket.
Now try to open a ssh session to the server, with `-v` set for more informations and it should work. Now try to open a ssh session to the server, with `-v` set for more informations and it should work.
problems that can occur problems that can occur
======================= =======================
no default realm no default realm
---------------- ----------------
The is the message The is the message
kinit(v5): Configuration file does not specify default realm when parsing name gibheer kinit(v5): Configuration file does not specify default realm when parsing name gibheer
which hints, that your `/etc/krb5/krb5.conf` is missing. which hints, that your `/etc/krb5/krb5.conf` is missing.
client/principal not found client/principal not found
-------------------------- --------------------------
The message The message
kinit(v5): Client 'foo@PROD.LAN' not found in Kerberos database while getting initial credentials kinit(v5): Client 'foo@PROD.LAN' not found in Kerberos database while getting initial credentials
is a hint, that you forgot to add the principal or that your username could not be found. Just add the principal with `kadmin` and it should work. is a hint, that you forgot to add the principal or that your username could not be found. Just add the principal with `kadmin` and it should work.
ssh does not use kerberos ssh does not use kerberos
------------------------- -------------------------
If ssh does not want to use kerberos at all, check for the GSSAPI options. These should be enabled by default, but can be disabled. If that's the case, add the following line to your `sshd_config`. If ssh does not want to use kerberos at all, check for the GSSAPI options. These should be enabled by default, but can be disabled. If that's the case, add the following line to your `sshd_config`.
GSSAPIAuthentication yes GSSAPIAuthentication yes
After a restart, ssh should use kerberos for authentication. After a restart, ssh should use kerberos for authentication.
links links
===== =====
* [setup of kerberos on opensolaris](http://www.linuxtopia.org/online_books/opensolaris_2008/SYSADV6/html/setup-148.html) * [setup of kerberos on opensolaris](http://www.linuxtopia.org/online_books/opensolaris_2008/SYSADV6/html/setup-148.html)
* [MIT kerberos page](http://web.mit.edu/kerberos/krb5-1.5/krb5-1.5.4/doc/krb5-admin/krb5_002econf.html) * [MIT kerberos page](http://web.mit.edu/kerberos/krb5-1.5/krb5-1.5.4/doc/krb5-admin/krb5_002econf.html)
* [KDC Setup on Solaris](http://wiki.creatica.org/cgi-bin/wiki.pl/Kerberos_KDC_server_on_Solaris) * [KDC Setup on Solaris](http://wiki.creatica.org/cgi-bin/wiki.pl/Kerberos_KDC_server_on_Solaris)
* [Kerberos password](http://fnal.gov/docs/strongauth/princ_pw.html#46115) * [Kerberos password](http://fnal.gov/docs/strongauth/princ_pw.html#46115)
* [Kerberos policies](http://pig.made-it.com/kerberos-policy.html) * [Kerberos policies](http://pig.made-it.com/kerberos-policy.html)
* [Administrative Guide to Kerberos](http://techpubs.spinlocksolutions.com/dklar/kerberos.html#err_server_not_found) * [Administrative Guide to Kerberos](http://techpubs.spinlocksolutions.com/dklar/kerberos.html#err_server_not_found)
one last word one last word
============= =============
I have one last word for you: Kerberos does not do authorization! I have one last word for you: Kerberos does not do authorization!
That means, that kerberos can not say, if one principal is allowed to use a service or not. It just manages the authentication for you. That means, that kerberos can not say, if one principal is allowed to use a service or not. It just manages the authentication for you.
If you want to manage the access, there are some possibilities for that. One is to use ldap, often used in conjunction with kerberos. Or you manage the `passwd` files or any other file yourself or you use a service like [chef](http://wiki.opscode.com/display/chef/Home) or [puppet](http://puppetlabs.com/). If you want to manage the access, there are some possibilities for that. One is to use ldap, often used in conjunction with kerberos. Or you manage the `passwd` files or any other file yourself or you use a service like [chef](http://wiki.opscode.com/display/chef/Home) or [puppet](http://puppetlabs.com/).
changelog changelog
========= =========
* added some explanation to `kpasswd_protocol` * added some explanation to `kpasswd_protocol`

View File

@ -5,13 +5,13 @@ author = "Gibheer"
draft = false draft = false
+++ +++
There is a bug in openindiana that does not let you get the content of a page with curl, when it's secured with ssl. The cause of this is an option set on compile time. This option is the the path to the certificate storage. There is a bug in openindiana that does not let you get the content of a page with curl, when it's secured with ssl. The cause of this is an option set on compile time. This option is the the path to the certificate storage.
In the case of openindiana this is set to `/etc/curl/curlCA`, but all certificates reside in `/etc/certs/CA/`. This leads to the following error message, when you try it: In the case of openindiana this is set to `/etc/curl/curlCA`, but all certificates reside in `/etc/certs/CA/`. This leads to the following error message, when you try it:
curl: (77) error setting certificate verify locations curl: (77) error setting certificate verify locations
To fix this, run the following script. To fix this, run the following script.
mkdir /etc/curl && cat /etc/certs/CA/*.pem > /etc/curl/curlCA mkdir /etc/curl && cat /etc/certs/CA/*.pem > /etc/curl/curlCA
This writes all certificates of the default CA in the file curl is looking for and after that, it works. This writes all certificates of the default CA in the file curl is looking for and after that, it works.

View File

@ -5,95 +5,95 @@ author = "Gibheer"
draft = true draft = true
+++ +++
Hey there! This time, we will get rubinius running on openindiana. As there is not package for llvm yet, it get's compiled within the build. Hey there! This time, we will get rubinius running on openindiana. As there is not package for llvm yet, it get's compiled within the build.
I got it this far because of crsd. He told me how to get llvm running, so that we could get rubinius to compile. I got it this far because of crsd. He told me how to get llvm running, so that we could get rubinius to compile.
After that [dbussink](https://twitter.com/#!/dbussink) got rbx to compile within two days! He found some really strange things, but in the end, rubinius can run on a solaris platform! After that [dbussink](https://twitter.com/#!/dbussink) got rbx to compile within two days! He found some really strange things, but in the end, rubinius can run on a solaris platform!
requirements requirements
============ ============
But first, you have to fulfill some requirements. First you have to add the sfe publisher to get the gcc4. But first, you have to fulfill some requirements. First you have to add the sfe publisher to get the gcc4.
You can do that with the command You can do that with the command
pkg set-publisher -O http://pkg.openindiana.org/sfe sfe pkg set-publisher -O http://pkg.openindiana.org/sfe sfe
After that install the following packages After that install the following packages
* developer/gcc-3 * developer/gcc-3
* system/header * system/header
* system/library/math/header-math * system/library/math/header-math
* gnu-tar * gnu-tar
* gnu-make * gnu-make
* gnu-binutils * gnu-binutils
* gnu-coreutils * gnu-coreutils
* gnu-findutils * gnu-findutils
* gnu-diffutils * gnu-diffutils
* gnu-grep * gnu-grep
* gnu-patch * gnu-patch
* gnu-sed * gnu-sed
* gawk * gawk
* gnu-m4 * gnu-m4
* bison * bison
* git * git
Yeah, that's alot of gnu, but we need it to get everything going. The cause of this are the old versions of solaris software, which do not support many features. The default compiler is even gcc 3.4.3! Yeah, that's alot of gnu, but we need it to get everything going. The cause of this are the old versions of solaris software, which do not support many features. The default compiler is even gcc 3.4.3!
After you have installed these packages, install the following package from sfe. After you have installed these packages, install the following package from sfe.
* runtime/gcc * runtime/gcc
The order is important, as gcc3 and gcc4 set symlinks in /usr/bin. If you install them in another order, the symlink is not correct and you end up having a lot of work. The order is important, as gcc3 and gcc4 set symlinks in /usr/bin. If you install them in another order, the symlink is not correct and you end up having a lot of work.
some patching some patching
============= =============
After that, we have to fix a small bug in gcc with editing the file `/usr/include/spawn.h`. After that, we have to fix a small bug in gcc with editing the file `/usr/include/spawn.h`.
73,76d72 73,76d72
< #ifdef __cplusplus < #ifdef __cplusplus
< char *const *_RESTRICT_KYWD argv, < char *const *_RESTRICT_KYWD argv,
< char *const *_RESTRICT_KYWD envp); < char *const *_RESTRICT_KYWD envp);
< #else < #else
79d74 79d74
< #endif < #endif
86,89d80 86,89d80
< #ifdef __cplusplus < #ifdef __cplusplus
< char *const *_RESTRICT_KYWD argv, < char *const *_RESTRICT_KYWD argv,
< char *const *_RESTRICT_KYWD envp); < char *const *_RESTRICT_KYWD envp);
< #else < #else
92d82 92d82
< #endif < #endif
This fixes a bug in gcc with [the __restrict key word](http://gcc.gnu.org/bugzilla/show_bug.cgi?id=49347). This fixes a bug in gcc with [the \_\_restrict key word](http://gcc.gnu.org/bugzilla/show_bug.cgi?id=49347).
fix the path fix the path
============ ============
Now that we installed and fix a bunch of things, we need to include the gnu path into our own. Use the following command to get this done Now that we installed and fix a bunch of things, we need to include the gnu path into our own. Use the following command to get this done
export PATH="/usr/gnu/bin:$PATH" export PATH="/usr/gnu/bin:$PATH"
Yes, it needs to be at the first place or else one of the old solaris binaries get's chosen and then, nothing works and produces weired errors. Yes, it needs to be at the first place or else one of the old solaris binaries get's chosen and then, nothing works and produces weired errors.
getting rbx to compile getting rbx to compile
====================== ======================
with an own build with an own build
----------------- -----------------
If you want to build rbx yourself, get the code from [https://github.com/rubinius/rubinius.git](https://github.com/rubinius/rubinius.git). After that, configure and rake and everything should be fine. If you want to build rbx yourself, get the code from [https://github.com/rubinius/rubinius.git](https://github.com/rubinius/rubinius.git). After that, configure and rake and everything should be fine.
with rvm with rvm
--------- ---------
If you want to get it working with rvm, install rvm like normal. After that you can simply install rbx with If you want to get it working with rvm, install rvm like normal. After that you can simply install rbx with
rvm install rbx rvm install rbx
That's all you need. That's all you need.
conclusion conclusion
========== ==========
After dbussink fixed all the errors, rbx compiles fine, when the toolchain is there. To get to this point was not easy, but we did it. So have a lot of fun with hacking on and using rubinius! After dbussink fixed all the errors, rbx compiles fine, when the toolchain is there. To get to this point was not easy, but we did it. So have a lot of fun with hacking on and using rubinius!

View File

@ -5,18 +5,18 @@ author = "Gibheer"
draft = false draft = false
+++ +++
If you encounter the following error with `make install` If you encounter the following error with `make install`
find: cycle detected for /lib/secure/32/ find: cycle detected for /lib/secure/32/
find: cycle detected for /lib/crypto/32/ find: cycle detected for /lib/crypto/32/
find: cycle detected for /lib/32/ find: cycle detected for /lib/32/
find: cycle detected for /usr/lib/elfedit/32/ find: cycle detected for /usr/lib/elfedit/32/
find: cycle detected for /usr/lib/secure/32/ find: cycle detected for /usr/lib/secure/32/
find: cycle detected for /usr/lib/link_audit/32/ find: cycle detected for /usr/lib/link_audit/32/
find: cycle detected for /usr/lib/lwp/32/ find: cycle detected for /usr/lib/lwp/32/
find: cycle detected for /usr/lib/locale/en_US.UTF-8/32/ find: cycle detected for /usr/lib/locale/en_US.UTF-8/32/
find: cycle detected for /usr/lib/locale/en_US.UTF-8/LO_LTYPE/32/ find: cycle detected for /usr/lib/locale/en_US.UTF-8/LO_LTYPE/32/
find: cycle detected for /usr/lib/locale/en_US.UTF-8/LC_CTYPE/32/ find: cycle detected for /usr/lib/locale/en_US.UTF-8/LC_CTYPE/32/
find: cycle detected for /usr/lib/32/ find: cycle detected for /usr/lib/32/
use `ginstall` in your Makefile instead of `install`. It seems just broken on solaris. use `ginstall` in your Makefile instead of `install`. It seems just broken on solaris.

View File

@ -9,7 +9,7 @@ draft = true
mein letztes System hat also über zwei Jahre gehalten. Ihr werdet euch mein letztes System hat also über zwei Jahre gehalten. Ihr werdet euch
vielleicht (noch/nicht) erinnern an:\ vielleicht (noch/nicht) erinnern an:\
http://zero-knowledge.org/post/25 [Lustige Gehversuche mit ...](/post/25.md)
Nun brachten mit die (un)glücklichen Umstände eines sterbenden Nun brachten mit die (un)glücklichen Umstände eines sterbenden
Monitorkabels dazu mein geliebtes Hermelin gegen die Grinsekatze Monitorkabels dazu mein geliebtes Hermelin gegen die Grinsekatze

View File

@ -5,21 +5,21 @@ author = "Gibheer"
draft = false draft = false
+++ +++
Two weeks ago, I had a problem with installing rubygems on my laptop. Yesterday, another person had the same problem, so I will document what is wrong here. Two weeks ago, I had a problem with installing rubygems on my laptop. Yesterday, another person had the same problem, so I will document what is wrong here.
The problem itself manifests in the way, that it installs gems with the error message The problem itself manifests in the way, that it installs gems with the error message
WARNING: You don't have /home/steven/.gem/rbx/1.8/bin in your PATH, WARNING: You don't have /home/steven/.gem/rbx/1.8/bin in your PATH,
gem executables will not run. gem executables will not run.
If you then want to use the binary provided with the gem, it will not work and it happens with all ruby versions, be it rubinius, jruby or 1.9. What makes it worse is the fact, that it only occurs on archlinux installations, till now. And it is not a problem of rvm! If you then want to use the binary provided with the gem, it will not work and it happens with all ruby versions, be it rubinius, jruby or 1.9. What makes it worse is the fact, that it only occurs on archlinux installations, till now. And it is not a problem of rvm!
So if you are on archlinux, look into `/etc/gemrc`. There will be a line saying So if you are on archlinux, look into `/etc/gemrc`. There will be a line saying
gemrc: --user-install gemrc: --user-install
To solve the problem, create a file `~/.gemrc` and put the line To solve the problem, create a file `~/.gemrc` and put the line
gemrc: gemrc:
in it. By doing that, the file `/etc/gemrc` will be ignored. And if you are manipulating that file, look into [all the other options](http://docs.rubygems.org/read/chapter/11) you can set. in it. By doing that, the file `/etc/gemrc` will be ignored. And if you are manipulating that file, look into [all the other options](http://docs.rubygems.org/read/chapter/11) you can set.

View File

@ -5,87 +5,87 @@ author = "Gibheer"
draft = false draft = false
+++ +++
After my openindiana server is already running for 4 months straight, I thought, I write a bit about the ecosystem of Illumos and its state. After my openindiana server is already running for 4 months straight, I thought, I write a bit about the ecosystem of Illumos and its state.
Illumos ecosystem Illumos ecosystem
================= =================
Illumos is the base system of which every distribution uses. It's more or less Illumos is the base system of which every distribution uses. It's more or less
the base system, like FreeBSD. With Solaris 11 being the original OpenSolaris, the base system, like FreeBSD. With Solaris 11 being the original OpenSolaris,
Illumos is a fork of what was open source of OpenSolaris in 2010. Illumos is a fork of what was open source of OpenSolaris in 2010.
The development on Illumos is pretty active and at the moment, there is no merge with the Solaris code base planned. Oracle distributed code after the Solaris 11 release, but it was mostly code which had to be distributed either way. So there were no updates on kernel or ZFS code. The development on Illumos is pretty active and at the moment, there is no merge with the Solaris code base planned. Oracle distributed code after the Solaris 11 release, but it was mostly code which had to be distributed either way. So there were no updates on kernel or ZFS code.
This has a huge impact on the future development of Illumos as everything has to be developed by contributors like Joyent, Nexenta and others. But it also has implications for most of the core features of Solaris, the most important ZFS. These are already noticeable with Solaris 11 having ZFS version 31 and FreeBSD and Illumos having version 28. This means, that neither FreeBSD nor Illumos can do something with a zpool created on a Solaris 11. This already makes a switch from one system to another difficult. This has a huge impact on the future development of Illumos as everything has to be developed by contributors like Joyent, Nexenta and others. But it also has implications for most of the core features of Solaris, the most important ZFS. These are already noticeable with Solaris 11 having ZFS version 31 and FreeBSD and Illumos having version 28. This means, that neither FreeBSD nor Illumos can do something with a zpool created on a Solaris 11. This already makes a switch from one system to another difficult.
But nevertheless the contributors to Illumos work to make it better. The largest part at the moment is to get Illumos compiling with GCC 4.6.1. At the first look, it seems like a minor problem, but OpenSolaris was not written to be built with GCC but with the proprietary SunStudio. As far as I could see, this has some major implications and raised huge holes in the code, which has to get fixed. But nevertheless the contributors to Illumos work to make it better. The largest part at the moment is to get Illumos compiling with GCC 4.6.1. At the first look, it seems like a minor problem, but OpenSolaris was not written to be built with GCC but with the proprietary SunStudio. As far as I could see, this has some major implications and raised huge holes in the code, which has to get fixed.
With that the base system is also upgraded from older versions of Perl and python, which also will be a longer process. With that the base system is also upgraded from older versions of Perl and python, which also will be a longer process.
Another huge part is the process of building packages. Solaris 10 and older used the SVR4 format. That was pretty simple and looked like rpm. OpenSolaris introduced a new format named IPS - Image Packaging System. This is also compatible with the SVR4 format. OpenSolaris had a pretty big infrastructure for building IPS packages, but it was lost when oracle acquired sun and shut it down. Another huge part is the process of building packages. Solaris 10 and older used the SVR4 format. That was pretty simple and looked like rpm. OpenSolaris introduced a new format named IPS - Image Packaging System. This is also compatible with the SVR4 format. OpenSolaris had a pretty big infrastructure for building IPS packages, but it was lost when oracle acquired sun and shut it down.
The problem now is, how to build new packages. Some are using SVR4 to build the IPS packages, which works good and the repository already has a bunch of newer releases of many projects. The problem now is, how to build new packages. Some are using SVR4 to build the IPS packages, which works good and the repository already has a bunch of newer releases of many projects.
Another attempt was to use pkgsrc. This is a project of NetBSD and already supports Solaris. This attempt died pretty fast. They were not used like FreeBSD ports and also not for compiling the packages. Another attempt was to use pkgsrc. This is a project of NetBSD and already supports Solaris. This attempt died pretty fast. They were not used like FreeBSD ports and also not for compiling the packages.
The third approach is to build a packing system on top of dpkg/apt. It is a collaboration between Nexenta, OpenIndiana and others. There is also a plan to build a new distribution out of it - named illumian. The third approach is to build a packing system on top of dpkg/apt. It is a collaboration between Nexenta, OpenIndiana and others. There is also a plan to build a new distribution out of it - named illumian.
One major difference between Solaris 11 and Illumos is that Illumos has KVM. It got ported from Linux by Joyent and works pretty good. With this step, Illumos not only had Zones for virtualization but also a full virtualization to get Linux running. One major difference between Solaris 11 and Illumos is that Illumos has KVM. It got ported from Linux by Joyent and works pretty good. With this step, Illumos not only had Zones for virtualization but also a full virtualization to get Linux running.
distribution ecosystem distribution ecosystem
====================== ======================
There are a bunch of distributions out there, trying to solve different problems. There are a bunch of distributions out there, trying to solve different problems.
[Solaris 11 - the first cloud os][solaris11] [Solaris 11 - the first cloud os][solaris11]
---------- ----------
Not so much a distribution of Illumos, but of the old OpenSolaris. Solaris 11 is a pretty good allround distribution. It is used from small systems to huge ones, running one application or some hundred on one machine. Some use it for storage and others to virtualize the hell out of it with zones and crossbow. Not so much a distribution of Illumos, but of the old OpenSolaris. Solaris 11 is a pretty good allround distribution. It is used from small systems to huge ones, running one application or some hundred on one machine. Some use it for storage and others to virtualize the hell out of it with zones and crossbow.
[OpenIndiana - open source and enterprise][openindiana] [OpenIndiana - open source and enterprise][openindiana]
----------- -----------
OpenIndiana was one of the first distributions using the Illumos core. It is available as a server distribution and a desktop one. The server one is targeted for the same usage as Solaris 11. As OpenIndiana uses Illumos it also has support for KVM and therefore can be used as a platform to host many fully virtualized instances on top of ZFS and crossbow infrastructure. OpenIndiana was one of the first distributions using the Illumos core. It is available as a server distribution and a desktop one. The server one is targeted for the same usage as Solaris 11. As OpenIndiana uses Illumos it also has support for KVM and therefore can be used as a platform to host many fully virtualized instances on top of ZFS and crossbow infrastructure.
A problem at the moment is the pretty old software it offers. Most of the packages are from OpenSolaris and therefore nearly 2 years old. Most of them don't even get security patches. The reason for that is the packaging topic mentioned above. As long as they don't have a strategy, nothing will change here. The only option is to use the sfe repo at the moment. A problem at the moment is the pretty old software it offers. Most of the packages are from OpenSolaris and therefore nearly 2 years old. Most of them don't even get security patches. The reason for that is the packaging topic mentioned above. As long as they don't have a strategy, nothing will change here. The only option is to use the sfe repo at the moment.
This may change in the future, because of the joint effort with Nexenta of packaging releases. This may change in the future, because of the joint effort with Nexenta of packaging releases.
OpenIndiana also has a desktop part which is targeted at ubuntu users wanting ZFS and time machine. As I used OpenSolaris already on a laptop I can only say "Yes, it works". But you have to decide yourself, if you can live with pretty old but stable software. And many projects are not even available in package form, so that one would have to compile it yourself. OpenIndiana also has a desktop part which is targeted at ubuntu users wanting ZFS and time machine. As I used OpenSolaris already on a laptop I can only say "Yes, it works". But you have to decide yourself, if you can live with pretty old but stable software. And many projects are not even available in package form, so that one would have to compile it yourself.
[Nexenta - enterprise storage for everyone][nexenta] [Nexenta - enterprise storage for everyone][nexenta]
------- -------
Nexenta is another distribution who switched to Illumos core pretty fast. It is intended to be used for storage systems, but can also be used for other kinds of servers. It also uses the debian package system and a gnu userland. It is available as a community edition and "enterprise" edition. Nexenta is another distribution who switched to Illumos core pretty fast. It is intended to be used for storage systems, but can also be used for other kinds of servers. It also uses the debian package system and a gnu userland. It is available as a community edition and "enterprise" edition.
The packages are a bit more up to date than the OpenIndiana ones. With the combined effort of both projects, they may keep closer to the actual releases. The packages are a bit more up to date than the OpenIndiana ones. With the combined effort of both projects, they may keep closer to the actual releases.
[illumian - illumos + debian package management][illumian] [illumian - illumos + debian package management][illumian]
-------- --------
Illumian is a new project and collaboration work between Nexenta and OpenIndiana. It will provide packages through the debian package management dpkg/apt. The target audience seems to be the same as OpenIndiana. The plan at the moment is to release all packages in the same version as in OpenIndiana, so that the ultimate choice will just be, if you want to use dpkg or IPS. Illumian is a new project and collaboration work between Nexenta and OpenIndiana. It will provide packages through the debian package management dpkg/apt. The target audience seems to be the same as OpenIndiana. The plan at the moment is to release all packages in the same version as in OpenIndiana, so that the ultimate choice will just be, if you want to use dpkg or IPS.
[SmartOS - the complete modern operating system][smartos] [SmartOS - the complete modern operating system][smartos]
------- -------
This is not so much a distribution as a live image. Its purpose is to use all disks in the server to create a zpool and use that to provide storage for virtual machines, be it zones or KVM instances. The KVM instances are also put into zones to attach dtrace to the virtual instances to see, what's going on in that instance. This is not so much a distribution as a live image. Its purpose is to use all disks in the server to create a zpool and use that to provide storage for virtual machines, be it zones or KVM instances. The KVM instances are also put into zones to attach dtrace to the virtual instances to see, what's going on in that instance.
SmartOS offers also pretty nice wrappers around the VM operating to get new instances up fast. SmartOS offers also pretty nice wrappers around the VM operating to get new instances up fast.
The company behind SmartOS is Joyent, more known for building node.js. They use SmartOS as the central pillar of their own JoyentCloud, where they host node.js applications, databases and also Linux machines. The company behind SmartOS is Joyent, more known for building node.js. They use SmartOS as the central pillar of their own JoyentCloud, where they host node.js applications, databases and also Linux machines.
[omnios][omnios] [omnios][omnios]
------ ------
OmniOS is a very new distribution and from OmniIT. It offers not much at the moment apart from an ISO image and a small wiki. OmniOS is a very new distribution and from OmniIT. It offers not much at the moment apart from an ISO image and a small wiki.
It is intended to be used much like FreeBSD. They provide a very stripped down Illumos core with updated packages as far as possible and nothing more. Every other package one might need has to be built and distributed through a package repository. The reason behind this is, that they only want to provide the basic image, which everybody needs, but not the packages needed only by themselves. And even these packages may be one or two versions behind. It is intended to be used much like FreeBSD. They provide a very stripped down Illumos core with updated packages as far as possible and nothing more. Every other package one might need has to be built and distributed through a package repository. The reason behind this is, that they only want to provide the basic image, which everybody needs, but not the packages needed only by themselves. And even these packages may be one or two versions behind.
And let me tell you - the packages they already updated may be considered bleeding edge by many debian stable users. And let me tell you - the packages they already updated may be considered bleeding edge by many debian stable users.
What next? What next?
========== ==========
This was the excursion into the world of Illumos based distributions. I myself will switch away from OpenIndiana. It's great, that Illumos lives and breathes more than 4 months ago, but there is much work left to do. SmartOS had a huge impact for me and others and Joyent and Nexenta do great work on improving the Ecosystem. This was the excursion into the world of Illumos based distributions. I myself will switch away from OpenIndiana. It's great, that Illumos lives and breathes more than 4 months ago, but there is much work left to do. SmartOS had a huge impact for me and others and Joyent and Nexenta do great work on improving the Ecosystem.
But it will be hard to get back to the times where OpenSolaris was. Too much time went by unused. But I'm looking forward what else might come up of Illumos land. But it will be hard to get back to the times where OpenSolaris was. Too much time went by unused. But I'm looking forward what else might come up of Illumos land.
[solaris11]: http://www.oracle.com/us/products/servers-storage/solaris/solaris11/overview/index.html "Solaris 11" [solaris11]: http://www.oracle.com/us/products/servers-storage/solaris/solaris11/overview/index.html "Solaris 11"
[illumos]: http://illumos.org/ "the illumos project" [illumos]: http://illumos.org/ "the illumos project"
[openindiana]: http://openindiana.org/ "OpenIndiana" [openindiana]: http://openindiana.org/ "OpenIndiana"
[smartos]: http://smartos.org/ "SmartOS - the complete modern operating system" [smartos]: http://smartos.org/ "SmartOS - the complete modern operating system"
[illumian]: http://illumian.org/ "illumian" [illumian]: http://illumian.org/ "illumian"
[nexenta]: http://nexentastor.org/ "Nexenta - the storage platform" [nexenta]: http://nexentastor.org/ "Nexenta - the storage platform"
[omnios]: http://omnios.omniti.com "OmniOS from OmniTI" [omnios]: http://omnios.omniti.com "OmniOS from OmniTI"

View File

@ -5,18 +5,18 @@ author = "Gibheer"
draft = false draft = false
+++ +++
Okay, let's say you are a proud owner of a system and use ZFS. Now lets assume that you lost a disk from your storage and want a fast backup of your data without the hassle of packing up everything, checking for permissions and so on. If the target system has ZFS too, then this will be fun for you, because I will show you, how to make a backup of a ZFS partition and all its descendants in some small steps. Okay, let's say you are a proud owner of a system and use ZFS. Now lets assume that you lost a disk from your storage and want a fast backup of your data without the hassle of packing up everything, checking for permissions and so on. If the target system has ZFS too, then this will be fun for you, because I will show you, how to make a backup of a ZFS partition and all its descendants in some small steps.
First, you have to build a recursive snapshot for the backup. This can be done with First, you have to build a recursive snapshot for the backup. This can be done with
zfs snapshot -r tank/testpartition@backup-today zfs snapshot -r tank/testpartition@backup-today
After that the real magic happens. We send this snapshot over ssh and import it on the other side. After that the real magic happens. We send this snapshot over ssh and import it on the other side.
zfs send -R tank/testpartition@backup-today | ssh target.machine "zfs recv -u tank/backup-machine" zfs send -R tank/testpartition@backup-today | ssh target.machine "zfs recv -u tank/backup-machine"
Now all partitions from =tank/testpartition= will be put in =tank/backup-machine= and everything will be preserved. Links will be links, permissions will be the same. The flag =-u= is to prevent mounting the partitions on the target machine or else all partitions will be mounted as they were before. Now all partitions from =tank/testpartition= will be put in =tank/backup-machine= and everything will be preserved. Links will be links, permissions will be the same. The flag =-u= is to prevent mounting the partitions on the target machine or else all partitions will be mounted as they were before.
As this sends the complete dataset over the wire, it is not that usable for backups every day. For this use case, use incremental sends (with the option =-i=). On the receiving side, nothing changes. As this sends the complete dataset over the wire, it is not that usable for backups every day. For this use case, use incremental sends (with the option =-i=). On the receiving side, nothing changes.
Thanks at this point to [shl](http://blogs.interdose.com/sebastian/) for showing me ZFS. Thanks at this point to [shl](http://blogs.interdose.com/sebastian/) for showing me ZFS.

View File

@ -5,30 +5,30 @@ author = "Stormwind"
draft = false draft = false
+++ +++
Hallo ihr, Hallo ihr,
========== ==========
da habe ich es doch tatsächlich völlig verschwitzt letztes Jahr auch von dem Wollfest in Nierstein zu berichten. Also habe ich nun kurz entschlossen die Nummerierung etwas angepasst. da habe ich es doch tatsächlich völlig verschwitzt letztes Jahr auch von dem Wollfest in Nierstein zu berichten. Also habe ich nun kurz entschlossen die Nummerierung etwas angepasst.
Dieses Jahr bin ich schon ganz früh dran mit Wollfesten. (Im Übrigen nicht zu verwechseln mit dem bösen Wollfasten.) Dieses Jahr bin ich schon ganz früh dran mit Wollfesten. (Im Übrigen nicht zu verwechseln mit dem bösen Wollfasten.)
In Backnang fand nämlich vorgestern und gestern das [2. Backnanger Wollfest](http://www.backnanger-wollfest.de/) statt und an dem Samstag war ich dann auch dabei. In Backnang fand nämlich vorgestern und gestern das [2. Backnanger Wollfest](http://www.backnanger-wollfest.de/) statt und an dem Samstag war ich dann auch dabei.
<div style="text-align:center"> <div style="text-align:center">
<img src="/images/wollfest2012-1.jpg" alt="Wollbeute Teil 1" /><br /><br /> <img src="/images/wollfest2012-1.jpg" alt="Wollbeute Teil 1" /><br /><br />
</div> </div>
Somit konnte ich meine schon vorhandenen Wollvorräten mit noch mehr Wolle weiter aufstocken, wie man auf den Bildern unschwer erkennen kann. Somit konnte ich meine schon vorhandenen Wollvorräten mit noch mehr Wolle weiter aufstocken, wie man auf den Bildern unschwer erkennen kann.
Aus dem Hause Zitron gibt es jetzt ein tolles 4-fach Sockengarn, was aus zwei normalen weißen und zwei schon vorgefärbt schwarzen Fäden besteht, was bedeutet, dass es jetzt auch wunderschön dunkle Sockenwolle vom Wolldrachen gibt, die nicht zum Teil komplett Schwarz sein muss. War ja klar, dass ich mir da wieder zwei Stränge unter den Nagel reißen musste. Aus dem Hause Zitron gibt es jetzt ein tolles 4-fach Sockengarn, was aus zwei normalen weißen und zwei schon vorgefärbt schwarzen Fäden besteht, was bedeutet, dass es jetzt auch wunderschön dunkle Sockenwolle vom Wolldrachen gibt, die nicht zum Teil komplett Schwarz sein muss. War ja klar, dass ich mir da wieder zwei Stränge unter den Nagel reißen musste.
Auch sehr schön, die beiden Kammzüge aus Seide einmal in grün und einmal in orange aus 100% Tussah Seide. (Ich schmachte dahin, es ist so wundervoll weich.) Da habe ich auch schon eine Idee was es werden soll, jetzt müsste es also nur noch versponnen und verzwirnt werden. Aber dazu fehlt mir noch den Faden, den ich zum verzwirnen benutzen möchte, den habe ich aber schon bestellt. Jetzt muss er nur noch hier ankommen. Ihr dürft also gespannt sein. Und ich bin es auch, obs am Ende so wird, wie ich das möchte. Auch sehr schön, die beiden Kammzüge aus Seide einmal in grün und einmal in orange aus 100% Tussah Seide. (Ich schmachte dahin, es ist so wundervoll weich.) Da habe ich auch schon eine Idee was es werden soll, jetzt müsste es also nur noch versponnen und verzwirnt werden. Aber dazu fehlt mir noch den Faden, den ich zum verzwirnen benutzen möchte, den habe ich aber schon bestellt. Jetzt muss er nur noch hier ankommen. Ihr dürft also gespannt sein. Und ich bin es auch, obs am Ende so wird, wie ich das möchte.
Ich habe auch noch zwei weitere Kammzüge mitgebracht, allerdings sieht man den zweiten auf dem Foto nicht, da er schon zu 50% meinem Spinnrad zum Opfer gefallen ist. Ich habe auch noch zwei weitere Kammzüge mitgebracht, allerdings sieht man den zweiten auf dem Foto nicht, da er schon zu 50% meinem Spinnrad zum Opfer gefallen ist.
<div style="float:right"> <div style="float:right">
<img src="/images/wollfest2012-2.jpg" alt="Wollbeute Teil 2" /> <img src="/images/wollfest2012-2.jpg" alt="Wollbeute Teil 2" />
</div> </div>
Und auch meinen Kater habe ich eine Freude gemacht, weil er nun einen neuen - wenngleich auch toten - Freund hat. Das Schaffell, was man im Hintergrund der Bilder sieht. Wobei er sich erst noch ein Bisschen daran gewöhnen muss. Ich glaube er hat vorher noch nie ein Fell gesehen und fand es erstmal gruselig bis er endlich einen Fuß bzw. Pfote darauf gesetzt hat. Und auch meinen Kater habe ich eine Freude gemacht, weil er nun einen neuen - wenngleich auch toten - Freund hat. Das Schaffell, was man im Hintergrund der Bilder sieht. Wobei er sich erst noch ein Bisschen daran gewöhnen muss. Ich glaube er hat vorher noch nie ein Fell gesehen und fand es erstmal gruselig bis er endlich einen Fuß bzw. Pfote darauf gesetzt hat.
Bis denne, Bis denne,
Stormwind Stormwind

View File

@ -5,10 +5,10 @@ author = "Gibheer"
draft = false draft = false
+++ +++
If you have synaptics and your application just keeps scrolling even after you stopped then put the following in your xorg.conf and it should stop that: If you have synaptics and your application just keeps scrolling even after you stopped then put the following in your xorg.conf and it should stop that:
Section "InputClass" Section "InputClass"
Option "CoastingSpeed" "0" Option "CoastingSpeed" "0"
EndSection EndSection
That should help. That should help.

View File

@ -5,8 +5,8 @@ author = "Gibheer"
draft = false draft = false
+++ +++
Whenever you have the need to "patch" the path in a Makefile, you can do that with the following line. Whenever you have the need to "patch" the path in a Makefile, you can do that with the following line.
PATH := $(PATH):/optional/path PATH := $(PATH):/optional/path
Use $(PATH) to get the PATH variable from the environment. `:=` is used to avoid circular dependencies in the form, that PATH gets reassigned over and over again. If you leave the colon, then make will inform you about this. Use $(PATH) to get the PATH variable from the environment. `:=` is used to avoid circular dependencies in the form, that PATH gets reassigned over and over again. If you leave the colon, then make will inform you about this.

View File

@ -5,12 +5,12 @@ author = "Gibheer"
draft = false draft = false
+++ +++
If you ever have the need to generate an encrypted password to put it into scripts or anything else, then you can use the following SQL command to generate it: If you ever have the need to generate an encrypted password to put it into scripts or anything else, then you can use the following SQL command to generate it:
select 'md5'||md5('password'||'rolename'); select 'md5'||md5('password'||'rolename');
or in shell or in shell
echo -n 'passwordrolename' | md5sum | awk '{ print "md5" $1 }' echo -n 'passwordrolename' | md5sum | awk '{ print "md5" $1 }'
Important is, that your rolename is appended to your password and this construct then put into md5. The important piece in the source is [pg_md5_encrypt()](http://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/backend/libpq/md5.c) Important is, that your rolename is appended to your password and this construct then put into md5. The important piece in the source is [pg_md5_encrypt()](http://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/backend/libpq/md5.c)

View File

@ -5,32 +5,32 @@ author = "Gibheer"
draft = true draft = true
+++ +++
One of the nicer things on Solaris is SMF, the service management facility. It is a replacement for the SysV init system. It's major features were parallel starting of services, built in service dependencies and pretty good reporting functionality. It also could pretty exactly report, which service failed and what other services were failing over that one. So the dependencies worked in starting and stopping direction. One of the nicer things on Solaris is SMF, the service management facility. It is a replacement for the SysV init system. It's major features were parallel starting of services, built in service dependencies and pretty good reporting functionality. It also could pretty exactly report, which service failed and what other services were failing over that one. So the dependencies worked in starting and stopping direction.
One of the worst parts of it was the horrible XML file you had to write for a service to work. I never found them very intuitive and too complex to "just" start and stop a service. One of the worst parts of it was the horrible XML file you had to write for a service to work. I never found them very intuitive and too complex to "just" start and stop a service.
It was a pretty good experience and I found it confusing on linux, that no init system was the same. On gentoo all init scripts have a status, only some have on debian. One has to take care of logging yourself and dependencies were either managed through a start order (in archlinux in /etc/rc.conf), by alphabetical order and runlevels (in gentoo) or they built their own system into the init scripts as comments (the debian way). It was a pretty good experience and I found it confusing on linux, that no init system was the same. On gentoo all init scripts have a status, only some have on debian. One has to take care of logging yourself and dependencies were either managed through a start order (in archlinux in /etc/rc.conf), by alphabetical order and runlevels (in gentoo) or they built their own system into the init scripts as comments (the debian way).
Then came ubuntu und built their own init system named upstart. It is a bit between the old SysV init system as it uses init like script files but extended them for some further information, like when to start a service. But most of the file still is mostly script code and gets executed by a shell. Then came ubuntu und built their own init system named upstart. It is a bit between the old SysV init system as it uses init like script files but extended them for some further information, like when to start a service. But most of the file still is mostly script code and gets executed by a shell.
One of the largest change for old users is, that this system can't make use of `/etc/init.d/scriptname start` anymore. Instead you use `start service` or `stop service`. I think, this is a pretty good API and much better for new users to learn. One of the largest change for old users is, that this system can't make use of `/etc/init.d/scriptname start` anymore. Instead you use `start service` or `stop service`. I think, this is a pretty good API and much better for new users to learn.
It can react on events to start and stop services and also can try to restart failed services. Services can also communicate with upstart over dbus. It can react on events to start and stop services and also can try to restart failed services. Services can also communicate with upstart over dbus.
Systemd on the other hand is a very different to SysV. At first I was a bit skeptical because of all the negativity. But now I run 3 systems with systemd and till now, it feels good. These three systems are all archlinux boxes and there the migrations is very easy. You can take a look at the [wiki page][archlinux-wiki-systemd]. You can run systemd parallel to SysV init and start it with a parameter in grub. If you feel confident enough you can uninstall the old system and use only systemd in the future. Systemd on the other hand is a very different to SysV. At first I was a bit skeptical because of all the negativity. But now I run 3 systems with systemd and till now, it feels good. These three systems are all archlinux boxes and there the migrations is very easy. You can take a look at the [wiki page][archlinux-wiki-systemd]. You can run systemd parallel to SysV init and start it with a parameter in grub. If you feel confident enough you can uninstall the old system and use only systemd in the future.
So what exactly has systemd, what SysV is missing? So what exactly has systemd, what SysV is missing?
It uses udev (or udev systemd?) pretty intensive, so that it can show you information about your hardware. It uses udev (or udev systemd?) pretty intensive, so that it can show you information about your hardware.
Another thing is, that it can start the daemons in parallel. This alone cut my boot time in half. But it is not that important to me. It was just a wow effect. Another thing is, that it can start the daemons in parallel. This alone cut my boot time in half. But it is not that important to me. It was just a wow effect.
Next is the management of the service themselves. The command `systemctl` is the API to everything in systemd, so not as nice as upstart. But with just `systemctl` you already get an overview which services are currently loaded, running and which failed (here is a [gist of what it looks like][github-systemctl-output]). You can start and stop services with `systemctl start` or `systemctl stop` but this will make it not permanent. If you come from solaris and SMF you will find it confusing, as an _enable_ or _disable_ will activate and start the service, or disable and stop the service. But I found it pretty helpful when switchting from one thing to another and I just don't want to stop the service. Next is the management of the service themselves. The command `systemctl` is the API to everything in systemd, so not as nice as upstart. But with just `systemctl` you already get an overview which services are currently loaded, running and which failed (here is a [gist of what it looks like][github-systemctl-output]). You can start and stop services with `systemctl start` or `systemctl stop` but this will make it not permanent. If you come from solaris and SMF you will find it confusing, as an _enable_ or _disable_ will activate and start the service, or disable and stop the service. But I found it pretty helpful when switchting from one thing to another and I just don't want to stop the service.
Think about switching the login manager. You may have installed the new one, enable it to start at boot time and disable the old one. Now you can still do other things and wait with the restart or you switch to the terminal and stop the old one and start the new one. I waited and did some configuration and afterwards, restarted the hole machine. Did not take me any longer than switching to the console. Think about switching the login manager. You may have installed the new one, enable it to start at boot time and disable the old one. Now you can still do other things and wait with the restart or you switch to the terminal and stop the old one and start the new one. I waited and did some configuration and afterwards, restarted the hole machine. Did not take me any longer than switching to the console.
Another feature is, that systemd has an included logger. For some it may be not a feature as they already use a configured syslog-ng and don't worry, you can still use it. The program is called journalctl and is bundled with systemd. Systemd starts every deamon within the logger context, so that every program does not have to worry about logging anymore but can just spout it onto STDOUT and systemd takes care of the rest. Yes, it sounds pretty retarded, but with it comes a pretty neat feature. If you now call `systemctl status service` you get an overview of the status of the service including the last couple log entries! As systemd builds automatic cgroups it can also show our the daemon dependencies. You can see how it looks like for [net-auto-wireless][github-systemctl-status] on my laptop. Another feature is, that systemd has an included logger. For some it may be not a feature as they already use a configured syslog-ng and don't worry, you can still use it. The program is called journalctl and is bundled with systemd. Systemd starts every deamon within the logger context, so that every program does not have to worry about logging anymore but can just spout it onto STDOUT and systemd takes care of the rest. Yes, it sounds pretty retarded, but with it comes a pretty neat feature. If you now call `systemctl status service` you get an overview of the status of the service including the last couple log entries! As systemd builds automatic cgroups it can also show our the daemon dependencies. You can see how it looks like for [net-auto-wireless][github-systemctl-status] on my laptop.
And I would say, that this is by far the best systemd could give me. If a service dies, I need to know all these things and this is the first system, which can show me. Not even SMF could do that. It would just tell me that my service died and others died of it as well, but it could not tell me exactly why or show me the logs. And I would say, that this is by far the best systemd could give me. If a service dies, I need to know all these things and this is the first system, which can show me. Not even SMF could do that. It would just tell me that my service died and others died of it as well, but it could not tell me exactly why or show me the logs.
Yes, my machines all boot faster and I profit from it. I shut down my pcs now. But that is nothing in comparison to the context I get when looking at a service. I can only recommend for everyone to try systemd and look for yourself, if it may help you get your work done faster. Yes, my machines all boot faster and I profit from it. I shut down my pcs now. But that is nothing in comparison to the context I get when looking at a service. I can only recommend for everyone to try systemd and look for yourself, if it may help you get your work done faster.
At the moment fedora, Suse and Mandriva seem to be only one using systemd by default. There are other distributions having packages for them. From debian I know, that you [can't replace][debian-systemd] the SysV as easy as on archlinux. But it seems to work. There are already many service files out there for various daemons so it seems to get moving pretty fast. It will be interesting how fast it will be adopted by various distributions. At the moment fedora, Suse and Mandriva seem to be only one using systemd by default. There are other distributions having packages for them. From debian I know, that you [can't replace][debian-systemd] the SysV as easy as on archlinux. But it seems to work. There are already many service files out there for various daemons so it seems to get moving pretty fast. It will be interesting how fast it will be adopted by various distributions.
[archlinux-wiki-systemd]: https://wiki.archlinux.org/index.php/Systemd [archlinux-wiki-systemd]: https://wiki.archlinux.org/index.php/Systemd
[github-systemctl-output]: https://gist.github.com/3180643#file_gistfile1.txt [github-systemctl-output]: https://gist.github.com/3180643#file_gistfile1.txt
[github-systemctl-status]: https://gist.github.com/3180643#file_systemctl_status_net_auto_wireless [github-systemctl-status]: https://gist.github.com/3180643#file_systemctl_status_net_auto_wireless
[debian-systemd]: http://wiki.debian.org/systemd#Issue_.231:_sysvinit_vs._systemd-sysv [debian-systemd]: http://wiki.debian.org/systemd#Issue_.231:_sysvinit_vs._systemd-sysv

View File

@ -5,20 +5,20 @@ author = "Gibheer"
draft = false draft = false
+++ +++
To rotate logs on a Solaris system you have to configure logadm to do it. To rotate logs on a Solaris system you have to configure logadm to do it.
This is a small example on how it could look like for lighttpd. This is a small example on how it could look like for lighttpd.
Execute the two following statements to create two log entries Execute the two following statements to create two log entries
logadm -w /var/lighttpd/1.4/logs/access.log -p 1d -C 8 -a 'pkill -HUP lighttpd; true' logadm -w /var/lighttpd/1.4/logs/access.log -p 1d -C 8 -a 'pkill -HUP lighttpd; true'
logadm -w /var/lighttpd/1.4/logs/error.log -p 1d -C 8 -a 'pkill -HUP lighttpd; true' logadm -w /var/lighttpd/1.4/logs/error.log -p 1d -C 8 -a 'pkill -HUP lighttpd; true'
After that, there should be two new entries in `/etc/logadm.conf` with all parameters you gave to logadm. The parameters mean, that the logs will be rotated once a day and 8 old logfiles will be stored. With every rotation lighttpd will be reloaded to use the new empty log file. For more parameters read the man page of logadm. there are also some nice examples at the bottom. After that, there should be two new entries in `/etc/logadm.conf` with all parameters you gave to logadm. The parameters mean, that the logs will be rotated once a day and 8 old logfiles will be stored. With every rotation lighttpd will be reloaded to use the new empty log file. For more parameters read the man page of logadm. there are also some nice examples at the bottom.
To try it out and see if it runs, create enough log entries and then call logadm with the logfile. In this case it would be To try it out and see if it runs, create enough log entries and then call logadm with the logfile. In this case it would be
logadm /var/lighttpd/1.4/logs/access.log logadm /var/lighttpd/1.4/logs/access.log
logadm /var/lighttpd/1.4/logs/error.log logadm /var/lighttpd/1.4/logs/error.log
After that, it should have created new log files and reloaded lighttpd. After that, it should have created new log files and reloaded lighttpd.

View File

@ -5,14 +5,14 @@ author = "Gibheer"
draft = false draft = false
+++ +++
After I switched everywhere to a tiling wm, I wondered how everybody else locks his screen. Sure, you can lock the screen with a keybinding, but what when you leave the pc for talking and then leave it be? After I switched everywhere to a tiling wm, I wondered how everybody else locks his screen. Sure, you can lock the screen with a keybinding, but what when you leave the pc for talking and then leave it be?
The tool I found is [xautolock][xautolock] and works pretty good. After a configurable time span it starts the lock and after another time it can also start suspend, hibernate or whatever. I use it with the following settings: The tool I found is [xautolock][xautolock] and works pretty good. After a configurable time span it starts the lock and after another time it can also start suspend, hibernate or whatever. I use it with the following settings:
xautolock -locker slock -time 2 -killer "systemctl suspend" -killtime 10 & xautolock -locker slock -time 2 -killer "systemctl suspend" -killtime 10 &
This starts slock, the simple locker, after two minutes and sends the pc into suspend after 10 minutes in activity. As it runs in the background, you can either start it through .xinitrc or with your wm of choice. This starts slock, the simple locker, after two minutes and sends the pc into suspend after 10 minutes in activity. As it runs in the background, you can either start it through .xinitrc or with your wm of choice.
To lock the screen by command, bind `xautolock -locknow` to your keys and it calls the deamon which then calls the locker. To lock the screen by command, bind `xautolock -locknow` to your keys and it calls the deamon which then calls the locker.
[xautolock]: http://freecode.com/projects/xautolock [xautolock]: http://freecode.com/projects/xautolock

View File

@ -5,90 +5,90 @@ author = "Gibheer"
draft = false draft = false
+++ +++
Just out of curiosity I tried to build a service for PostgreSQL and the systemd init system. Before that, I only read the service files of postgres and dhcp delivered with Archlinux. What I wanted to build is a service file able to start multiple instances of postgres with separate configuration files. Just out of curiosity I tried to build a service for PostgreSQL and the systemd init system. Before that, I only read the service files of postgres and dhcp delivered with Archlinux. What I wanted to build is a service file able to start multiple instances of postgres with separate configuration files.
This was much easier than I thought it would be. This was much easier than I thought it would be.
Systemd supports that pretty well and the only thing to do, is add an '@' to the service file name. Everything after '@' is then put into a variable `%I`, which can be used in the service file. So my service file was named 'pg@.service' and I put it into `/etc/systemd/system`. Another possible location is `/usr/lib/systemd/system/`. Systemd supports that pretty well and the only thing to do, is add an '@' to the service file name. Everything after '@' is then put into a variable `%I`, which can be used in the service file. So my service file was named 'pg@.service' and I put it into `/etc/systemd/system`. Another possible location is `/usr/lib/systemd/system/`.
The service file looks like an `.ini` file. It has the three sections Unit, Service and Install. The section Install in which target the service is installed. Targets are like run levels in other init systems. The postgres service gets installed into the multi-user target, which is started after the network: The service file looks like an `.ini` file. It has the three sections Unit, Service and Install. The section Install in which target the service is installed. Targets are like run levels in other init systems. The postgres service gets installed into the multi-user target, which is started after the network:
[Install] [Install]
WantedBy=multi-user.target WantedBy=multi-user.target
The next part is Unit. This section describes the service with a short description and a description of the dependencies. Postgres just needs the network up, so this section looks like this: The next part is Unit. This section describes the service with a short description and a description of the dependencies. Postgres just needs the network up, so this section looks like this:
[Unit] [Unit]
Description=run PostgreSQL instance %I Description=run PostgreSQL instance %I
After=network.target After=network.target
There you can also see the %I, which is replaced with the part after '@' from the name in systemd. There you can also see the %I, which is replaced with the part after '@' from the name in systemd.
The next section is a bit larger and describes everything needed to manage the service itself, like start, stop and reload. The next section is a bit larger and describes everything needed to manage the service itself, like start, stop and reload.
[Service] [Service]
User=postgres User=postgres
Group=postgres Group=postgres
TimeoutSec=120 TimeoutSec=120
Type=forking Type=forking
EnvironmentFile=/etc/conf.d/pg.%I EnvironmentFile=/etc/conf.d/pg.%I
SyslogIdentifier=postgres-%i SyslogIdentifier=postgres-%i
ExecStartPre=/usr/bin/postgresql-check-db-dir ${pgdata} ExecStartPre=/usr/bin/postgresql-check-db-dir ${pgdata}
ExecStart= /usr/bin/pg_ctl -s -D ${pgdata} start -w -t 120 ExecStart= /usr/bin/pg_ctl -s -D ${pgdata} start -w -t 120
ExecReload=/usr/bin/pg_ctl -s -D ${pgdata} reload ExecReload=/usr/bin/pg_ctl -s -D ${pgdata} reload
ExecStop= /usr/bin/pg_ctl -s -D ${pgdata} stop -m fast ExecStop= /usr/bin/pg_ctl -s -D ${pgdata} stop -m fast
OOMScoreAdjust=-200 OOMScoreAdjust=-200
Okay, this is a bit longer than the other parts. The first Couple of options handle the user to start with and he startup timeout. The timeout can't be replaced with a variable because all options from the config will be loaded as environment variables on execution. The Type option is very important, because it can't be set to anything else as forking for postgres, because it will fork to the background. So if you start it as a simple service systemd would loose the handler to postgres and stop it immediately. Okay, this is a bit longer than the other parts. The first Couple of options handle the user to start with and he startup timeout. The timeout can't be replaced with a variable because all options from the config will be loaded as environment variables on execution. The Type option is very important, because it can't be set to anything else as forking for postgres, because it will fork to the background. So if you start it as a simple service systemd would loose the handler to postgres and stop it immediately.
The next options are EnvironmentFile and SyslogIdentifier. The first is for a small config file in `/etc/conf.d/pg.instance` where you replace instance with the instance name. As you can see with the %I in place, it will fill up the full name with the instance identifier. So you can use different config files for different instances. The same happens to the SyslogIdentifier. I thought it would be awesome if the log can be showed per instance and this is what you need to make it happen. The next options are EnvironmentFile and SyslogIdentifier. The first is for a small config file in `/etc/conf.d/pg.instance` where you replace instance with the instance name. As you can see with the %I in place, it will fill up the full name with the instance identifier. So you can use different config files for different instances. The same happens to the SyslogIdentifier. I thought it would be awesome if the log can be showed per instance and this is what you need to make it happen.
The option OOMScoreAdjust is just an option for the OOMKiller, that it should leave postgres alone as much as possible. The option OOMScoreAdjust is just an option for the OOMKiller, that it should leave postgres alone as much as possible.
The option ExecStartPre calls a script which is delivered with postgres on Archlinux and does a check for the data dir. If it does not exist, it will log a line on how to create it. Pretty neat. ExecStart, ExecStop and ExecReload describe the actions to be done, when the service should be started, stopped or reloaded. As you can see, the script uses `${pgdata}` to determine where to look and that variable comes from the EnvironmentFile, which looks for my first instance like this The option ExecStartPre calls a script which is delivered with postgres on Archlinux and does a check for the data dir. If it does not exist, it will log a line on how to create it. Pretty neat. ExecStart, ExecStop and ExecReload describe the actions to be done, when the service should be started, stopped or reloaded. As you can see, the script uses `${pgdata}` to determine where to look and that variable comes from the EnvironmentFile, which looks for my first instance like this
pgdata=/tmp/ins1 pgdata=/tmp/ins1
The file is saved as `/etc/conf.d/pg.ins1` and is really nothing more than this. The rest can handle postgres itself. The file is saved as `/etc/conf.d/pg.ins1` and is really nothing more than this. The rest can handle postgres itself.
Now how do we get the service file into systemd? You do a Now how do we get the service file into systemd? You do a
systemctl --system daemon-reload systemctl --system daemon-reload
and then and then
systemctl start pg@ins1.service systemctl start pg@ins1.service
This creates your first service and tries to start it. You will get an error message like the following This creates your first service and tries to start it. You will get an error message like the following
Job for pg@ins1.service failed. See 'systemctl status pg@ins1.service' and 'journalctl' for details. Job for pg@ins1.service failed. See 'systemctl status pg@ins1.service' and 'journalctl' for details.
If you run the status, you will see that it failed, how it failed and the log message from the check script. After that, you can create the instance and start it anew there it is. If you run the status, you will see that it failed, how it failed and the log message from the check script. After that, you can create the instance and start it anew there it is.
# systemctl status pg@ins1.service # systemctl status pg@ins1.service
pg@ins1.service - PostgreSQL database server pg@ins1.service - PostgreSQL database server
Loaded: loaded (/etc/systemd/system/pg@ins1.service; disabled) Loaded: loaded (/etc/systemd/system/pg@ins1.service; disabled)
Active: active (running) since Tue, 25 Sep 2012 09:27:54 +0200; 3 days ago Active: active (running) since Tue, 25 Sep 2012 09:27:54 +0200; 3 days ago
Process: 372 ExecStop=/usr/bin/pg_ctl -s -D ${PGROOT}/data stop -m fast (code=exited, status=0/SUCCESS) Process: 372 ExecStop=/usr/bin/pg_ctl -s -D ${PGROOT}/data stop -m fast (code=exited, status=0/SUCCESS)
Process: 624 ExecStart=/usr/bin/pg_ctl -s -D ${PGROOT}/data start -w -t 120 (code=exited, status=0/SUCCESS) Process: 624 ExecStart=/usr/bin/pg_ctl -s -D ${PGROOT}/data start -w -t 120 (code=exited, status=0/SUCCESS)
Process: 619 ExecStartPre=/usr/bin/postgresql-check-db-dir ${PGROOT}/data (code=exited, status=0/SUCCESS) Process: 619 ExecStartPre=/usr/bin/postgresql-check-db-dir ${PGROOT}/data (code=exited, status=0/SUCCESS)
Main PID: 627 (postgres) Main PID: 627 (postgres)
CGroup: name=systemd:/system/postgresql.service CGroup: name=systemd:/system/postgresql.service
├ 627 /usr/bin/postgres -D /var/lib/postgres/data ├ 627 /usr/bin/postgres -D /var/lib/postgres/data
├ 629 postgres: checkpointer process ├ 629 postgres: checkpointer process
├ 630 postgres: writer process ├ 630 postgres: writer process
├ 631 postgres: wal writer process ├ 631 postgres: wal writer process
├ 632 postgres: autovacuum launcher process ├ 632 postgres: autovacuum launcher process
└ 633 postgres: stats collector process └ 633 postgres: stats collector process
Now if you want to see some logging, you can ask `journcalctl` and give it the log string. Now if you want to see some logging, you can ask `journcalctl` and give it the log string.
journcalctl SYSLOG_IDENTIFIER=postgres-instance1 journcalctl SYSLOG_IDENTIFIER=postgres-instance1
That's all there is to multi instance services on syslog. To figure everything out actually took not even much time, as the documentation is pretty good. Just a hint, don't look in the web for documentation but in the man pages. The best starting point to look for documentation is `man systemd` and then take a look at the _SEE ALSO_ section. That's all there is to multi instance services on syslog. To figure everything out actually took not even much time, as the documentation is pretty good. Just a hint, don't look in the web for documentation but in the man pages. The best starting point to look for documentation is `man systemd` and then take a look at the _SEE ALSO_ section.
Have fun! Have fun!

View File

@ -5,49 +5,49 @@ author = "Gibheer"
draft = false draft = false
+++ +++
Today we release a small project from me - [zero][zero-github] 0.1.0. Today we release a small project from me - [zero][zero-github] 0.1.0.
It is aimed at being a toolkit for building web services. It is build around the idea of abstracting away what is tedious work and work with the information clearer. With that in mind, some modules are already included. These are the following. It is aimed at being a toolkit for building web services. It is build around the idea of abstracting away what is tedious work and work with the information clearer. With that in mind, some modules are already included. These are the following.
Request Request
------- -------
This class provides an interface to information regarding the request and making them available grouped together in other parts. This class provides an interface to information regarding the request and making them available grouped together in other parts.
One example is the grouping of parameters in one attribute `request.params` which makes *get* and *post* parameters available separately. As many developers also regard URLs as some kind of parameters it is possible to define custom key/value pairs. One example is the grouping of parameters in one attribute `request.params` which makes *get* and *post* parameters available separately. As many developers also regard URLs as some kind of parameters it is possible to define custom key/value pairs.
Another example is the grouping of all accept headers under `#accept`, which makes accessing type information nice to read: `request.accept.types`. Another example is the grouping of all accept headers under `#accept`, which makes accessing type information nice to read: `request.accept.types`.
Response Response
-------- --------
The response class helps in building responses in a easy way. It provides similar interface as the `Rack::Response`. It also does a small check already for status code correctness and will probably get more helpers in the future to set common headers and status codes. The response class helps in building responses in a easy way. It provides similar interface as the `Rack::Response`. It also does a small check already for status code correctness and will probably get more helpers in the future to set common headers and status codes.
Router Router
------ ------
Zero has a small router which takes routes and pushes the request to other applications. A similar implementation is Rack::URLMap, but this router is also able to extract variables from the URL and puts them into the Request. Zero has a small router which takes routes and pushes the request to other applications. A similar implementation is Rack::URLMap, but this router is also able to extract variables from the URL and puts them into the Request.
Renderer Renderer
-------- --------
The renderer is a facility to render templates according to the accept type wanted by the client. That is possible by providing a map of short type names to the actual mimetypes which is then used internally to search for the right template. With this, developers have only to provide the template and mapping and then the rendering takes care of actually using it. The renderer is a facility to render templates according to the accept type wanted by the client. That is possible by providing a map of short type names to the actual mimetypes which is then used internally to search for the right template. With this, developers have only to provide the template and mapping and then the rendering takes care of actually using it.
The router does not take care of the actual method of the request. This can be done before the router with a small application defining routers for every possible method or working with the method in the applications called by the routers. The router does not take care of the actual method of the request. This can be done before the router with a small application defining routers for every possible method or working with the method in the applications called by the routers.
Controller Controller
---------- ----------
This component is still a "Work in Progress" but can already be used to glue all these parts together. It splits the work into two steps - processing the request and rendering the resulting data. If that workflow does not fit, it is also possible to extend or modify the controller to adapt other work flows. This component is still a "Work in Progress" but can already be used to glue all these parts together. It splits the work into two steps - processing the request and rendering the resulting data. If that workflow does not fit, it is also possible to extend or modify the controller to adapt other work flows.
Status and Future of the toolset Status and Future of the toolset
-------------------------------- --------------------------------
The idea is to make web service development easier than before and this is only the beginning of zero. The plan is to extend the toolkit with more modules and make all modules possibly available as stand alone modules. That way, zero can be used in other projects to replace parts or help in making developing easier. It is a Work in Progress at the moment but the results so far are very promising. The idea is to make web service development easier than before and this is only the beginning of zero. The plan is to extend the toolkit with more modules and make all modules possibly available as stand alone modules. That way, zero can be used in other projects to replace parts or help in making developing easier. It is a Work in Progress at the moment but the results so far are very promising.
We also have a [repository for example applications][zero-examples] and we will extend it with the time to show as many aspects of the toolkit as possible. We also have a [repository for example applications][zero-examples] and we will extend it with the time to show as many aspects of the toolkit as possible.
If you are interested, checkout [zero on github][zero-github] or have a look at the [examples][zero-examples]. If you are interested, checkout [zero on github][zero-github] or have a look at the [examples][zero-examples].
[zero-github]: https://github.com/Gibheer/zero [zero-github]: https://github.com/Gibheer/zero
[zero-examples]: https://github.com/Gibheer/zero-examples [zero-examples]: https://github.com/Gibheer/zero-examples

View File

@ -5,60 +5,60 @@ author = "Gibheer"
draft = false draft = false
+++ +++
Today someone told me about natural and inner joins. As I'm using SQL for many years already, I was a bit puzzled at first. I heard of the terms, but thought till now, that they were meaning the same. Today someone told me about natural and inner joins. As I'm using SQL for many years already, I was a bit puzzled at first. I heard of the terms, but thought till now, that they were meaning the same.
The first thing I did was looking in the [PostgreSQL documentation][pg-doc] and yes, they are not the same. But they are also the same. The first thing I did was looking in the [PostgreSQL documentation][pg-doc] and yes, they are not the same. But they are also the same.
The inner join is the default for doing joins. It just joins two tables using the on clause. The inner join is the default for doing joins. It just joins two tables using the on clause.
# select * # select *
from tableA A from tableA A
join tableB B join tableB B
on A.id = B.id; on A.id = B.id;
| a.id | b.id | | a.id | b.id |
|------|------| |------|------|
| 3 | 3 | | 3 | 3 |
Now an interesting thing is, that the on clause can be replaced by a using clause when both tables provide the same columns. This not only makes the select a bit shorter, but also reduces the number of columns in the result. All columns listed in the using clause will be left out from the result and replaced with a new column with the name used in the using clause. The select from above would then look like this Now an interesting thing is, that the on clause can be replaced by a using clause when both tables provide the same columns. This not only makes the select a bit shorter, but also reduces the number of columns in the result. All columns listed in the using clause will be left out from the result and replaced with a new column with the name used in the using clause. The select from above would then look like this
# select * # select *
from tableA A from tableA A
join tableB B join tableB B
using (id); using (id);
| id | | id |
|----| |----|
| 3 | | 3 |
The natural join goes one step further and tries to search for common columns itself and generate a using clause itself. The resulting query then looks like this The natural join goes one step further and tries to search for common columns itself and generate a using clause itself. The resulting query then looks like this
# select * # select *
from tableA A from tableA A
natural join tableB B; natural join tableB B;
| id | | id |
|----| |----|
| 3 | | 3 |
As nice as this seems it can backfire pretty fast, when one has two tables with a column of the same name, but completely different content not meant to be joined. Then it is possible just to get nothing. As nice as this seems it can backfire pretty fast, when one has two tables with a column of the same name, but completely different content not meant to be joined. Then it is possible just to get nothing.
# select * from foo limit 1; # select * from foo limit 1;
id | test | bar id | test | bar
----+------+----- ----+------+-----
1 | 1 | 3 1 | 1 | 3
# select * from baz limit 1; # select * from baz limit 1;
id | test | bar id | test | bar
----+------+----- ----+------+-----
1 | 1 | 20 1 | 1 | 20
# select * from foo natural join baz # select * from foo natural join baz
id | test | bar id | test | bar
----+------+----- ----+------+-----
As all columns are named the same, but the content is different in column bar, no common row is found and therefore returned. As all columns are named the same, but the content is different in column bar, no common row is found and therefore returned.
For further information, the [PostgreSQL documentation][pg-doc] is pretty good. For further information, the [PostgreSQL documentation][pg-doc] is pretty good.
[pg-doc]: http://www.postgresql.org/docs/current/static/queries-table-expressions.html [pg-doc]: http://www.postgresql.org/docs/current/static/queries-table-expressions.html

View File

@ -5,22 +5,22 @@ author = "Gibheer"
draft = false draft = false
+++ +++
This is some kind of hint for others, which may have the same problems I had. This is some kind of hint for others, which may have the same problems I had.
I wanted to compile [llvm](http://llvm.org/) 3.1 on omnios, an illumos distribution but it did not work out like I wanted it to. One of the first errors I got was a linking error. I wanted to compile [llvm](http://llvm.org/) 3.1 on omnios, an illumos distribution but it did not work out like I wanted it to. One of the first errors I got was a linking error.
Text relocation remains referenced Text relocation remains referenced
against symbol offset in file against symbol offset in file
llvm::LoopBase<llvm::MachineBasicBlock, llvm::MachineLoop>::getLoopPredecessor() const 0x149a /tmp/build_gibheer/llvm-3.1.src/Release/lib/libLLVMCodeGen.a(MachineLICM.o) llvm::LoopBase<llvm::MachineBasicBlock, llvm::MachineLoop>::getLoopPredecessor() const 0x149a /tmp/build_gibheer/llvm-3.1.src/Release/lib/libLLVMCodeGen.a(MachineLICM.o)
llvm::LoopBase<llvm::MachineBasicBlock, llvm::MachineLoop>::getExitBlocks(llvm::SmallVectorImpl<llvm::MachineBasicBlock*>&) const 0x6200 /tmp/build_gibheer/llvm-3.1.src/Release/lib/libLLVMCodeGen.a(MachineLICM.o) llvm::LoopBase<llvm::MachineBasicBlock, llvm::MachineLoop>::getExitBlocks(llvm::SmallVectorImpl<llvm::MachineBasicBlock*>&) const 0x6200 /tmp/build_gibheer/llvm-3.1.src/Release/lib/libLLVMCodeGen.a(MachineLICM.o)
ld: fatal: relocations remain against allocatable but non-writable sections ld: fatal: relocations remain against allocatable but non-writable sections
The problem in this case is, that parts of the llvm code are not compiled position independent (PIC). As I learned, this can be solved with the following setting. The problem in this case is, that parts of the llvm code are not compiled position independent (PIC). As I learned, this can be solved with the following setting.
LDFLAGS="-mimpure-text -Wl,-ztextwarn" LDFLAGS="-mimpure-text -Wl,-ztextwarn"
This changes the way of linking to only warn about position independent code, but still link it all together. It is not a nice solution, but with this, it is possible to find out, where it is gooing wrong. This changes the way of linking to only warn about position independent code, but still link it all together. It is not a nice solution, but with this, it is possible to find out, where it is gooing wrong.
After that problem partially solved, I had another problem. Solaris supports 32bit and 64bit programs on the same environment, just like you can do on linux with multilib. The first compile of llvm produced 32bit binaries. When trying to compile llvm for 64bit, it was just impossible. I tried different things, like setting `CFLAGS`, `LDFLAGS`, `OTHER_OPTIONS` whatever there was and the only thing to get it compiled for 64bit is to overwrite `CC` and `CXX`. It seems like the Makefile just ignores the CFLAGS and therefore does only compile the code for the hostsystem seemingly bitness. After that problem partially solved, I had another problem. Solaris supports 32bit and 64bit programs on the same environment, just like you can do on linux with multilib. The first compile of llvm produced 32bit binaries. When trying to compile llvm for 64bit, it was just impossible. I tried different things, like setting `CFLAGS`, `LDFLAGS`, `OTHER_OPTIONS` whatever there was and the only thing to get it compiled for 64bit is to overwrite `CC` and `CXX`. It seems like the Makefile just ignores the CFLAGS and therefore does only compile the code for the hostsystem seemingly bitness.
But both of these problems got solved with 3.2, which I tried from svn and they work. The release date of 3.2 is only 7 days away, so hopefully it will still work by then. Nice thing is, [Rubinius](https://github.com/rubinius/rubinius) can already use it :D But both of these problems got solved with 3.2, which I tried from svn and they work. The release date of 3.2 is only 7 days away, so hopefully it will still work by then. Nice thing is, [Rubinius](https://github.com/rubinius/rubinius) can already use it :D

View File

@ -5,140 +5,140 @@ author = "Gibheer"
draft = false draft = false
+++ +++
I got an interesting question regarding zones on Solaris in #omnios. I got an interesting question regarding zones on Solaris in #omnios.
> scarcry: Does anyone know how to move a zone from one zpool to another? > scarcry: Does anyone know how to move a zone from one zpool to another?
There are some guides out there on how to move a zone from one machine to There are some guides out there on how to move a zone from one machine to
another, but most of them install the zone in the same place as before. another, but most of them install the zone in the same place as before.
But instead of moving it from one machine to another, this small guide will just But instead of moving it from one machine to another, this small guide will just
show what to do, when only the location is chaning. show what to do, when only the location is chaning.
preparations preparations
------------ ------------
First, we need to setup the partitions and zones for our little experiment. For First, we need to setup the partitions and zones for our little experiment. For
this example, I will use the pool `rpool` and the following partitions this example, I will use the pool `rpool` and the following partitions
* `rpool/zones/old` mounted to `/zones/old/` * `rpool/zones/old` mounted to `/zones/old/`
* `rpool/zones/new` mounted to `/zones/new/` * `rpool/zones/new` mounted to `/zones/new/`
We also need the zone config, so here is it. We also need the zone config, so here is it.
create -b create -b
set zonepath=/zones/old/zone1 set zonepath=/zones/old/zone1
set ip-type=exclusive set ip-type=exclusive
set autoboot=false set autoboot=false
add net add net
set physical=zone1 set physical=zone1
end end
commit commit
Just install the zone with the normal commands Just install the zone with the normal commands
$ zonecfg -z zone1 < zone.config $ zonecfg -z zone1 < zone.config
$ zoneadm -z zone1 install $ zoneadm -z zone1 install
$ zoneadm -z zone1 boot $ zoneadm -z zone1 boot
Check if the zone is running and write a file, just to make sure, we have the Check if the zone is running and write a file, just to make sure, we have the
same zone at the end. same zone at the end.
moving the zone moving the zone
--------------- ---------------
For this guide, we will assume, that the zone is in production use and can't be For this guide, we will assume, that the zone is in production use and can't be
offline too long. For that to work, we will do a first snapshot, when the zone offline too long. For that to work, we will do a first snapshot, when the zone
is still running. is still running.
$ zfs snapshot -r rpool/zones/old/zone1@move1 $ zfs snapshot -r rpool/zones/old/zone1@move1
After that, we can replay that snapshot into the new location. After that, we can replay that snapshot into the new location.
$ zfs send -R rpool/zones/old/zone1@move1 | zfs recv rpool/zones/new/zone1 $ zfs send -R rpool/zones/old/zone1@move1 | zfs recv rpool/zones/new/zone1
This step will take some time, depending on the size of your zone. Now we stop This step will take some time, depending on the size of your zone. Now we stop
the the zone and detach it. the the zone and detach it.
$ zoneadm -z zone1 halt $ zoneadm -z zone1 halt
$ zoneadm -z zone1 detach $ zoneadm -z zone1 detach
This frees the zfs partition from the zone and makes it accessible. We need that This frees the zfs partition from the zone and makes it accessible. We need that
a bit later. a bit later.
Now we need an incremental snapshot and move that data to the new location. Now we need an incremental snapshot and move that data to the new location.
$ zfs snapshot -r rpool/zones/old/zone1@move2 $ zfs snapshot -r rpool/zones/old/zone1@move2
$ zfs send -R -i move1 rpool/zones/old/zone1@move2 | zfs recv rpool/zones/new/zone1 $ zfs send -R -i move1 rpool/zones/old/zone1@move2 | zfs recv rpool/zones/new/zone1
When we now list all zfs partitions, we see, that a partition zbe is mounted two When we now list all zfs partitions, we see, that a partition zbe is mounted two
times into the same location. times into the same location.
rpool/zones/old/zone1/ROOT/zbe 724M 1.59T 723M /zones/old/zone1/root rpool/zones/old/zone1/ROOT/zbe 724M 1.59T 723M /zones/old/zone1/root
rpool/zones/new/zone1/ROOT/zbe 724M 1.59T 723M /zones/old/zone1/root rpool/zones/new/zone1/ROOT/zbe 724M 1.59T 723M /zones/old/zone1/root
To fix that, issue the following command. To fix that, issue the following command.
zfs set mountpoint=/zones/new/zone1/root rpool/zones/new/zone1/ROOT/zbe zfs set mountpoint=/zones/new/zone1/root rpool/zones/new/zone1/ROOT/zbe
Now the partition has to be mounted, so that zoneadm can find it for the attach. Now the partition has to be mounted, so that zoneadm can find it for the attach.
You can do that with the following command You can do that with the following command
zfs mount rpool/zones/new/zone1/ROOT/zbe zfs mount rpool/zones/new/zone1/ROOT/zbe
Now with the partition in the correct place, we have to tell the zone, where to Now with the partition in the correct place, we have to tell the zone, where to
look for its new partition. look for its new partition.
$ zonecfg -z zone1 $ zonecfg -z zone1
zonecfg:zone1> set zonepath=/zones/new/zone1 zonecfg:zone1> set zonepath=/zones/new/zone1
zonecfg:zone1> verify zonecfg:zone1> verify
zonecfg:zone1> commit zonecfg:zone1> commit
zonecfg:zone1> exit zonecfg:zone1> exit
With the zone reconfigured, attach the zone. With the zone reconfigured, attach the zone.
$ zoneadm -z zone1 attach $ zoneadm -z zone1 attach
This may take a bit of time, as the content of the zone gets checked for This may take a bit of time, as the content of the zone gets checked for
compatibility. When it is back, check the zone is installed. compatibility. When it is back, check the zone is installed.
$ zoneadm list -cv $ zoneadm list -cv
ID NAME STATUS PATH BRAND IP ID NAME STATUS PATH BRAND IP
- zone1 installed /zones/new/zone1 ipkg excl - zone1 installed /zones/new/zone1 ipkg excl
Now boot the zone and we are done. Now boot the zone and we are done.
$ zoneadm -z zone1 boot $ zoneadm -z zone1 boot
Now check if everything is where you expect it to be and start your services and Now check if everything is where you expect it to be and start your services and
everything is good. everything is good.
ideas ideas
----- -----
Here are some ideas, what can be done differently in the process. Here are some ideas, what can be done differently in the process.
### **iterative snapshots** ### **iterative snapshots**
If you zone has a lot of traffic, where many changes aggregate between the first If you zone has a lot of traffic, where many changes aggregate between the first
snapshot and the second, do some more iterative snapshots before taking down the snapshot and the second, do some more iterative snapshots before taking down the
zone. zone.
This has the advantage, that you can close the gap of changes to a minimum size This has the advantage, that you can close the gap of changes to a minimum size
and therefore make the move at the end a bit faster. But check the available and therefore make the move at the end a bit faster. But check the available
disk space in the process to avoid a full disk. disk space in the process to avoid a full disk.
### **create a new zone** ### **create a new zone**
Instead of chaning the old zone and therefore making a rollback more complicated, Instead of chaning the old zone and therefore making a rollback more complicated,
create a new zone, which looks exactly like the old one. create a new zone, which looks exactly like the old one.
Instead of chaning the old one, do instead Instead of chaning the old one, do instead
$ zonecfg -z zone2 $ zonecfg -z zone2
zonecfg:zone2> create -a /zones/new/zone1 zonecfg:zone2> create -a /zones/new/zone1
This will set everything from the old zone with the new zonepath. Keep in mind, This will set everything from the old zone with the new zonepath. Keep in mind,
that this will also use the old interface. If you don't want that, create a new that this will also use the old interface. If you don't want that, create a new
interface before and change it in the config step. interface before and change it in the config step.
You can also restore that zfs partition in a partition which has the correct. You can also restore that zfs partition in a partition which has the correct.
I hope it helps and you have some fun playing with it. I hope it helps and you have some fun playing with it.

View File

@ -5,29 +5,29 @@ author = "Gibheer"
draft = false draft = false
+++ +++
I had the need to filter logs from different programs into different places - in this case the postgres and nginx logs. The man page of `syslog.conf` describes it pretty good, but misses some examples to make it more clear. So here is how I configured it, to make it easier. I had the need to filter logs from different programs into different places - in this case the postgres and nginx logs. The man page of `syslog.conf` describes it pretty good, but misses some examples to make it more clear. So here is how I configured it, to make it easier.
First, I edited the `syslog.conf` First, I edited the `syslog.conf`
# filter everything apart from postgres and nginx # filter everything apart from postgres and nginx
!-postgres,nginx !-postgres,nginx
*.err;kern.warning;auth.notice;mail.crit /dev/console *.err;kern.warning;auth.notice;mail.crit /dev/console
# and all the other stuff # and all the other stuff
# filter only postgres # filter only postgres
!postgres !postgres
*.* /var/log/postgresql.log *.* /var/log/postgresql.log
# filter only nginx # filter only nginx
!nginx !nginx
*.* /var/log/nginx.log *.* /var/log/nginx.log
The next step is to setup the log rotate. This happens in `/etc/newsyslog.conf`. The man page is very helpful, so if you want to adjust something, take a peek into it. The next step is to setup the log rotate. This happens in `/etc/newsyslog.conf`. The man page is very helpful, so if you want to adjust something, take a peek into it.
# postgresql # postgresql
/var/log/postgresql.log 640 5 100 * JC /var/log/postgresql.log 640 5 100 * JC
# nginx # nginx
/var/log/nginx.log 640 5 100 * JC /var/log/nginx.log 640 5 100 * JC
And that is all. If you want to add more program filtes, you have to define them in the `syslog.conf` as *notfilter* and *filter* and add the rotate to `newsyslog.conf`. And that is all. If you want to add more program filtes, you have to define them in the `syslog.conf` as *notfilter* and *filter* and add the rotate to `newsyslog.conf`.

5
go.mod Normal file
View File

@ -0,0 +1,5 @@
module git.zero-knowledge.org/gibheer/zblog
go 1.18
require github.com/russross/blackfriday/v2 v2.1.0

2
go.sum Normal file
View File

@ -0,0 +1,2 @@
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=

217
main.go Normal file
View File

@ -0,0 +1,217 @@
package main
import (
"bytes"
"flag"
"fmt"
"html/template"
"io"
"io/fs"
"log"
"net/http"
"os"
"path"
"path/filepath"
"time"
"github.com/russross/blackfriday/v2"
)
var (
contentDir = flag.String("content-dir", "content", "path to the content directory")
staticDir = flag.String("static-dir", "static", "path to the static files")
templateDir = flag.String("template-dir", "templates", "path to the template directory")
outputDir = flag.String("output-dir", "", "path to output all files from the render process")
listen = flag.String("listen", "", "When provided with a listen port, start serving the content")
)
type (
Metadata struct {
URLPath string
FilePath string
Template string
Title string
Date time.Time
Author string
Draft bool
}
)
func main() {
flag.Parse()
var err error
tmplDirFS := os.DirFS(*templateDir)
templates := template.New("")
templates = templates.Funcs(template.FuncMap(
map[string]interface{}{
"formatTime": func(t time.Time) string {
return t.Format("2006-01-02")
},
},
))
templates, err = templates.ParseFS(tmplDirFS, "*")
if err != nil {
log.Fatalf("could not parse template files: %s", err)
}
content := []Metadata{}
if err := filepath.Walk(*contentDir, func(path string, info fs.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
m, err := MetadataFromFile(*contentDir, path)
if err != nil {
return fmt.Errorf("could not parse metadata from '%s': %w", path, err)
}
content = append(content, m)
return nil
}); err != nil {
log.Fatalf("could not read content: %s", err)
}
if *outputDir != "" {
for _, metadata := range content {
p := *outputDir + metadata.URLPath
if p[len(p)-1] == '/' {
p = path.Join(p, "index.html")
}
// create directory
if _, err := os.Stat(path.Dir(p)); os.IsNotExist(err) {
if err := os.MkdirAll(path.Dir(p), 0755); err != nil {
log.Fatalf("could not create directory '%s': %s", path.Dir(p), err)
}
}
f, err := os.Create(p)
if err != nil {
log.Fatalf("could not create new file '%s': %s", p, err)
}
defer f.Close()
if err := metadata.Render(f, templates); err != nil {
log.Fatalf("could not render '%s': %s", metadata.FilePath, err)
}
f.Close()
}
}
if *listen != "" {
http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir(*staticDir))))
for _, metadata := range content {
func(m Metadata) {
http.HandleFunc(m.URLPath, func(w http.ResponseWriter, r *http.Request) {
log.Printf("%s -> %s", r.URL, m.URLPath)
w.Header()["Content-Type"] = []string{"text/html"}
if err := m.Render(w, templates); err != nil {
log.Printf("could not render '%s': %s", m.FilePath, err)
}
})
}(metadata)
}
log.Fatalf("stopped listening: %s", http.ListenAndServe(*listen, nil))
}
if *outputDir == "" && *listen == "" {
log.Printf("neither output-dir nor listen are requested - doing nothing")
}
}
var (
metadataStart = []byte("+++\n")
metadataEnd = []byte("\n+++\n")
headerTitle = "title"
headerDate = "date"
headerAuthor = "author"
headerURLPath = "url"
headerDraft = "draft"
headerTemplate = "template"
)
// ContentFromFile reads the header of the file to create the metadata.
//
// basePath is stripped from the path when generating the default URL path.
func MetadataFromFile(basePath string, path string) (Metadata, error) {
m := Metadata{
FilePath: path,
URLPath: path[len(basePath):],
Template: "content.html",
}
raw, err := os.ReadFile(m.FilePath)
if err != nil {
return m, err
}
if !bytes.HasPrefix(raw, metadataStart) {
return m, fmt.Errorf("missing metadata header, must start with +++")
}
last := bytes.Index(raw, metadataEnd)
if last == -1 {
return m, fmt.Errorf("missing metadata header, must end with +++ on a single line")
}
rawHeader := raw[len(metadataStart):last]
lineNum := 0
for _, headerLine := range bytes.Split(rawHeader, []byte("\n")) {
if len(headerLine) == 0 {
continue
}
line := bytes.SplitN(headerLine, []byte("="), 2)
if len(line) != 2 {
return m, fmt.Errorf("line %d: format must be 'key = value'", lineNum)
}
key := string(bytes.Trim(line[0], " "))
val := string(bytes.Trim(line[1], ` "'`))
switch string(key) {
case headerTitle:
m.Title = val
case headerAuthor:
m.Author = val
case headerDraft:
if headerDraft == "true" {
m.Draft = true
}
case headerTemplate:
m.Template = val
case headerDate:
m.Date, err = time.Parse(time.RFC3339, val)
if err != nil {
log.Printf("line %d: date must match RFC3339 format", lineNum)
}
case headerURLPath:
m.URLPath = val
default:
log.Printf("line %d: unknown header %s found in %s", lineNum, key, path)
}
lineNum += 1
}
return m, nil
}
func (m Metadata) Content() template.HTML {
result := ""
raw, err := os.ReadFile(m.FilePath)
if err != nil {
log.Printf("error reading file: %w", err)
return template.HTML("")
}
end := bytes.Index(raw, metadataEnd)
if end == -1 {
log.Printf("could not find metadata end")
return template.HTML("")
}
result = string(blackfriday.Run(raw[end+len(metadataEnd):]))
return template.HTML(result)
}
func (m Metadata) Render(w io.Writer, tmpl *template.Template) error {
if err := tmpl.ExecuteTemplate(w, m.Template, m); err != nil {
return fmt.Errorf("could not render content path '%s': %w", m.FilePath, err)
}
return nil
}

Binary file not shown.

Binary file not shown.

Binary file not shown.

BIN
static/go-mono/Go-Mono.ttf Normal file

Binary file not shown.

36
static/go-mono/README Normal file
View File

@ -0,0 +1,36 @@
These fonts were created by the Bigelow & Holmes foundry specifically for the
Go project. See https://blog.golang.org/go-fonts for details.
They are licensed under the same open source license as the rest of the Go
project's software:
Copyright (c) 2016 Bigelow & Holmes Inc.. All rights reserved.
Distribution of this font is governed by the following license. If you do not
agree to this license, including the disclaimer, do not distribute or modify
this font.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Google Inc. nor the names of its contributors may be
used to endorse or promote products derived from this software without
specific prior written permission.
DISCLAIMER: THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

Before

Width:  |  Height:  |  Size: 40 KiB

After

Width:  |  Height:  |  Size: 40 KiB

14
templates/content.html Normal file
View File

@ -0,0 +1,14 @@
{{ template "header.html" . }}
<main>
<h1>{{ .Title }}</h1>
<aside>{{ if .Author }}by
<a href="/author/{{ .Author }}" class="author">{{ .Author }}</a>
- {{ end }}from
<span class="date">{{ formatTime .Date }}</span>
</aside>
{{ .Content }}
</main>
<footer>
<a href="#top">top</a>
</footer>
{{ template "footer.html" . }}

1
templates/footer.html Normal file
View File

@ -0,0 +1 @@
</body></html>

37
templates/header.html Normal file
View File

@ -0,0 +1,37 @@
<!DOCTYPE html>
<html>
<head>
<title>zero-knowledge - {{ .Title }}</title>
<style type="text/css">
:root {
--bg-color: hsl(204, 35%, 20%);
--bg-code-color: hsl(204, 10%, 20%);
--tx-color: hsl(34, 5%, 80%);
--hi-color: hsl(34, 100%, 50%);
--lo-color: hsl(15, 100%, 50%);
}
body { background: var(--bg-color);
color: var(--tx-color);
font-size: 100%; }
a { color: var(--hi-color); }
a:visited { color: var(--lo-color); }
main { max-width: 75%; margin-left: auto; margin-right: auto; }
main aside { width: 100%;
border-top: 0.1em solid var(--hi-color);
padding-top: 0.5em;
padding-left: 0.5em; }
pre { margin: 0; padding: 1em; background: var(--bg-code-color); }
code { margin: 0; padding: 0; font-size: 100%; }
pre, code { font-family: GoMonoRegular, monospace; }
@font-face { font-family: 'GoMonoRegular';
src: url('/static/go-mono/GoMonoRegular.ttf') format('truetype');
font-weight: normal; font-style: normal; }
</style>
</head>
<body>
<header>
<a href="/" class="logo" id="top">
<img src="/static/zero-knowledge.png" alt="zero-knowledge" />
</a>
</header>

8
templates/index.html Normal file
View File

@ -0,0 +1,8 @@
{{ template "header.html" . }}
<main>
{{ .Content }}
</main>
<footer>
<a href="#top">top</a>
</footer>
{{ template "footer.html" . }}

View File

@ -1,3 +0,0 @@
{{ partial "header.html" . }}
{{ partial "li" .Paginator }}
{{ partial "footer.html" . }}

View File

@ -1,28 +0,0 @@
<div class="entry">
<nav class="pagination">
{{ if .NextInSection }}
<a href="{{ .NextInSection.Permalink }}">newer</a>
{{ else }}
<a href="" class="deactivated">newer</a>
{{ end }}
</nav>
<header>
<h1><a href="{{ .Permalink }}">{{ .Title }}</a></h1>
</header>
<aside>
{{ if .Params.author }}
<a class="author" href="{{ .Site.BaseURL }}/author/{{ lower .Params.author }}">{{ .Params.author }}</a>
{{ end }}
<span class="date">{{ dateFormat "2006-01-02 15:04" .Date }}</span>
</aside>
<section>
{{ .Content }}
</section>
<nav class="pagination">
{{ if .PrevInSection }}
<a href="{{ .PrevInSection.Permalink }}">older</a>
{{ else }}
<a href="" class="deactivated">older</a>
{{ end }}
</nav>
</div>

View File

@ -1,3 +0,0 @@
{{ partial "header.html" . }}
{{ .Render "si" }}
{{ partial "footer.html" . }}

View File

@ -1,11 +0,0 @@
<div class="entry short">
<header>
<h1><a href="{{ .Permalink }}">{{ .Title }}</a></h1>
</header>
<aside>
{{ if .Truncated }}<a class="more" href="{{ .Permalink }}">read more</a>{{ end }}
</aside>
<section class="summary">
{{ .Content }}
</section>
</div>

View File

@ -1,3 +0,0 @@
{{ partial "header.html" . }}
{{ partial "li.html" (.Paginate (where .Data.Pages "Section" "post")) }}
{{ partial "footer.html" . }}

View File

@ -1,2 +0,0 @@
</body>
</html>

View File

@ -1,15 +0,0 @@
<!DOCTYPE html>
<html>
<head>
<title>{{ if eq .URL "/" }}{{ .Site.Title }}{{ else }}{{ .Title }} - {{ .Site.Title }}{{ end }}</title>
<meta content="text/html; charset=utf-8" http-equiv="Content-Type">
<meta name="theme-color" content="#3a5f78">
{{ if .RSSlink }}
<link href="{{ .RSSlink }}" rel="alternate" title="RSS 2.0" type="application/rss+xml" />
{{ end }}
<link href="{{ .Site.BaseURL }}/css/style.css" rel="stylesheet" type="text/css"></link>
</head>
<body>
<header>
<h1><a href="{{ .Site.BaseURL }}/">{{ .Site.Title }}</a></h1>
</header>

View File

@ -1,21 +0,0 @@
<nav class="pagination">
{{ if .HasPrev }}
<a href="{{ .Prev.URL }}">newer</a>
{{ else }}
<a href="" class="deactivated">newer</a>
{{ end }}
</nav>
<ul class="entries">
{{ range .Pages }}
<li>
{{ .Render "summary" }}
</li>
{{ end }}
</ul>
<nav class="pagination">
{{ if .HasNext }}
<a href="{{ .Next.URL }}">older</a>
{{ else }}
<a href="" class="deactivated">older</a>
{{ end }}
</nav>

View File

@ -1,15 +0,0 @@
<article class="entry short">
<header>
<h1><a href="{{.Permalink }}">{{ .Title }}</a></h1>
</header>
<nav>
{{ if .Truncated }}<a class="more" href="{{ .Permalink }}">read more</a>{{ end }}
</nav>
<aside>
<a class="author" href="{{ .Site.BaseURL }}/author/{{ lower .Params.author }}">{{ .Params.author }}</a>
<span class="date">{{ dateFormat "2006-01-02 15:04" .Date }}</span>
</aside>
<section class="summary">
{{ .Summary }}
</section>
</div>

View File

@ -1,154 +0,0 @@
* {
box-sizing: border-box;
margin: 0;
padding: 0;
}
body > header > h1 {
margin: 1em;
}
body > header > h1 > a {
color: transparent;
display: block;
width: 465px;
height: 117px;
background: url('/img/zero-knowledge.png');
}
.entries {
list-style-type: none;
display: flex;
flex-flow: row wrap;
justify-content: space-around;
align-items: stretch;
}
.entries > li {
margin-top: 0.5em;
margin-bottom: 0.5em;
padding: 1em;
}
body {
background-image: url('/img/background.png');
}
@media screen and ((max-width: 750px) or (orientation: portait)) {
.entries > li {
width: 100%;
}
}
@media screen and (min-width: 750px) and (orientation: landscape) {
.entries > li {
width: 49%;
}
}
@media screen and (min-width: 1000px) and (orientation: landscape) {
.entries > li {
width: 32%;
}
}
@media screen and (min-width: 1450px) and (orientation: landscape) {
.entries > li {
width: 24%;
}
}
nav.pagination > a {
display: block;
width: 100%;
text-align: center;
padding: 0.5em;
font-weight: bold;
color: #ff9900;
background-color: #3a5f78;
}
nav.pagination > a.deactivated {
color: transparent;
}
article {
display: flex;
flex-direction: column;
}
article > header {
order: 1;
}
article > nav {
order: 4;
}
article > aside {
order: 2;
}
article > section {
order: 3;
}
article > header > h1, .entry > header > h1 {
border-bottom: 0.1em solid #ff9900;
}
article > header > h1 > *, .entry > header > h1 > * {
text-decoration: none;
font-size: 1.25rem;
color: #ff9900;
}
article > aside, .entry > aside {
margin-bottom: 0.5em;
}
article > aside > *, .entry > aside > * {
color: #3a5f78;
font-weight: bold;
font-size: 0.8rem;
}
article > aside > .author::before, .entry > aside > .author::before {
content: 'by ';
}
article > aside > .date::before, .entry > aside > .date::before {
content: 'on ';
}
article > section {
}
article > nav > a {
font-size: 0.8rem;
color: #3a5f78;
font-weight: bold;
}
article > nav > .more::after {
content: ' >';
}
body > .entry > header {
margin-top: 2em;
}
.entry > header > h1, .entry > aside {
padding-left: 1em;
}
.entry > section {
margin: auto;
max-width: 50rem;
margin-bottom: 1em;
}
.entry > section h1, .entry > section h2 {
margin-top: 1em;
margin-bottom: 0.5em;
font-size: 1.25rem;
color: #3a5f78;
border-bottom: 0.1em solid #3a5f78;
}
.entry > section pre {
margin: 0.5em;
padding: 0.5em;
background-color: rgba(58, 95, 120, 0.3);
overflow-x: auto;
}
.entry > section ul, .entry > section dl, .entry > section ol {
margin: 0.5em;
margin-left: 1.5em;
}
section > p {
margin-bottom: 1.00rem;
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 84 KiB

View File

@ -1,8 +0,0 @@
========================================================
This pattern is downloaded from www.subtlepatterns.com
If you need more, that's where to get'em.
========================================================

8
vendor/github.com/russross/blackfriday/v2/.gitignore generated vendored Normal file
View File

@ -0,0 +1,8 @@
*.out
*.swp
*.8
*.6
_obj
_test*
markdown
tags

17
vendor/github.com/russross/blackfriday/v2/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,17 @@
sudo: false
language: go
go:
- "1.10.x"
- "1.11.x"
- tip
matrix:
fast_finish: true
allow_failures:
- go: tip
install:
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d -s .)
- go tool vet .
- go test -v ./...

29
vendor/github.com/russross/blackfriday/v2/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,29 @@
Blackfriday is distributed under the Simplified BSD License:
> Copyright © 2011 Russ Ross
> All rights reserved.
>
> Redistribution and use in source and binary forms, with or without
> modification, are permitted provided that the following conditions
> are met:
>
> 1. Redistributions of source code must retain the above copyright
> notice, this list of conditions and the following disclaimer.
>
> 2. Redistributions in binary form must reproduce the above
> copyright notice, this list of conditions and the following
> disclaimer in the documentation and/or other materials provided with
> the distribution.
>
> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
> POSSIBILITY OF SUCH DAMAGE.

335
vendor/github.com/russross/blackfriday/v2/README.md generated vendored Normal file
View File

@ -0,0 +1,335 @@
Blackfriday
[![Build Status][BuildV2SVG]][BuildV2URL]
[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL]
===========
Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
is paranoid about its input (so you can safely feed it user-supplied
data), it is fast, it supports common extensions (tables, smart
punctuation substitutions, etc.), and it is safe for all utf-8
(unicode) input.
HTML output is currently supported, along with Smartypants
extensions.
It started as a translation from C of [Sundown][3].
Installation
------------
Blackfriday is compatible with modern Go releases in module mode.
With Go installed:
go get github.com/russross/blackfriday/v2
will resolve and add the package to the current development module,
then build and install it. Alternatively, you can achieve the same
if you import it in a package:
import "github.com/russross/blackfriday/v2"
and `go get` without parameters.
Legacy GOPATH mode is unsupported.
Versions
--------
Currently maintained and recommended version of Blackfriday is `v2`. It's being
developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the
documentation is available at
https://pkg.go.dev/github.com/russross/blackfriday/v2.
It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`.
Version 2 offers a number of improvements over v1:
* Cleaned up API
* A separate call to [`Parse`][4], which produces an abstract syntax tree for
the document
* Latest bug fixes
* Flexibility to easily add your own rendering extensions
Potential drawbacks:
* Our benchmarks show v2 to be slightly slower than v1. Currently in the
ballpark of around 15%.
* API breakage. If you can't afford modifying your code to adhere to the new API
and don't care too much about the new features, v2 is probably not for you.
* Several bug fixes are trailing behind and still need to be forward-ported to
v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for
tracking.
If you are still interested in the legacy `v1`, you can import it from
`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found
here: https://pkg.go.dev/github.com/russross/blackfriday.
Usage
-----
For the most sensible markdown processing, it is as simple as getting your input
into a byte slice and calling:
```go
output := blackfriday.Run(input)
```
Your input will be parsed and the output rendered with a set of most popular
extensions enabled. If you want the most basic feature set, corresponding with
the bare Markdown specification, use:
```go
output := blackfriday.Run(input, blackfriday.WithNoExtensions())
```
### Sanitize untrusted content
Blackfriday itself does nothing to protect against malicious content. If you are
dealing with user-supplied markdown, we recommend running Blackfriday's output
through HTML sanitizer such as [Bluemonday][5].
Here's an example of simple usage of Blackfriday together with Bluemonday:
```go
import (
"github.com/microcosm-cc/bluemonday"
"github.com/russross/blackfriday/v2"
)
// ...
unsafe := blackfriday.Run(input)
html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
```
### Custom options
If you want to customize the set of options, use `blackfriday.WithExtensions`,
`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`.
### `blackfriday-tool`
You can also check out `blackfriday-tool` for a more complete example
of how to use it. Download and install it using:
go get github.com/russross/blackfriday-tool
This is a simple command-line tool that allows you to process a
markdown file using a standalone program. You can also browse the
source directly on github if you are just looking for some example
code:
* <https://github.com/russross/blackfriday-tool>
Note that if you have not already done so, installing
`blackfriday-tool` will be sufficient to download and install
blackfriday in addition to the tool itself. The tool binary will be
installed in `$GOPATH/bin`. This is a statically-linked binary that
can be copied to wherever you need it without worrying about
dependencies and library versions.
### Sanitized anchor names
Blackfriday includes an algorithm for creating sanitized anchor names
corresponding to a given input text. This algorithm is used to create
anchors for headings when `AutoHeadingIDs` extension is enabled. The
algorithm has a specification, so that other packages can create
compatible anchor names and links to those anchors.
The specification is located at https://pkg.go.dev/github.com/russross/blackfriday/v2#hdr-Sanitized_Anchor_Names.
[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday/v2#SanitizedAnchorName) exposes this functionality, and can be used to
create compatible links to the anchor names generated by blackfriday.
This algorithm is also implemented in a small standalone package at
[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients
that want a small package and don't need full functionality of blackfriday.
Features
--------
All features of Sundown are supported, including:
* **Compatibility**. The Markdown v1.0.3 test suite passes with
the `--tidy` option. Without `--tidy`, the differences are
mostly in whitespace and entity escaping, where blackfriday is
more consistent and cleaner.
* **Common extensions**, including table support, fenced code
blocks, autolinks, strikethroughs, non-strict emphasis, etc.
* **Safety**. Blackfriday is paranoid when parsing, making it safe
to feed untrusted user input without fear of bad things
happening. The test suite stress tests this and there are no
known inputs that make it crash. If you find one, please let me
know and send me the input that does it.
NOTE: "safety" in this context means *runtime safety only*. In order to
protect yourself against JavaScript injection in untrusted content, see
[this example](https://github.com/russross/blackfriday#sanitize-untrusted-content).
* **Fast processing**. It is fast enough to render on-demand in
most web applications without having to cache the output.
* **Thread safety**. You can run multiple parsers in different
goroutines without ill effect. There is no dependence on global
shared state.
* **Minimal dependencies**. Blackfriday only depends on standard
library packages in Go. The source code is pretty
self-contained, so it is easy to add to any project, including
Google App Engine projects.
* **Standards compliant**. Output successfully validates using the
W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional.
Extensions
----------
In addition to the standard markdown syntax, this package
implements the following extensions:
* **Intra-word emphasis supression**. The `_` character is
commonly used inside words when discussing code, so having
markdown interpret it as an emphasis command is usually the
wrong thing. Blackfriday lets you treat all emphasis markers as
normal characters when they occur inside a word.
* **Tables**. Tables can be created by drawing them in the input
using a simple syntax:
```
Name | Age
--------|------
Bob | 27
Alice | 23
```
* **Fenced code blocks**. In addition to the normal 4-space
indentation to mark code blocks, you can explicitly mark them
and supply a language (to make syntax highlighting simple). Just
mark it like this:
```go
func getTrue() bool {
return true
}
```
You can use 3 or more backticks to mark the beginning of the
block, and the same number to mark the end of the block.
To preserve classes of fenced code blocks while using the bluemonday
HTML sanitizer, use the following policy:
```go
p := bluemonday.UGCPolicy()
p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code")
html := p.SanitizeBytes(unsafe)
```
* **Definition lists**. A simple definition list is made of a single-line
term followed by a colon and the definition for that term.
Cat
: Fluffy animal everyone likes
Internet
: Vector of transmission for pictures of cats
Terms must be separated from the previous definition by a blank line.
* **Footnotes**. A marker in the text that will become a superscript number;
a footnote definition that will be placed in a list of footnotes at the
end of the document. A footnote looks like this:
This is a footnote.[^1]
[^1]: the footnote text.
* **Autolinking**. Blackfriday can find URLs that have not been
explicitly marked as links and turn them into links.
* **Strikethrough**. Use two tildes (`~~`) to mark text that
should be crossed out.
* **Hard line breaks**. With this extension enabled newlines in the input
translate into line breaks in the output. This extension is off by default.
* **Smart quotes**. Smartypants-style punctuation substitution is
supported, turning normal double- and single-quote marks into
curly quotes, etc.
* **LaTeX-style dash parsing** is an additional option, where `--`
is translated into `&ndash;`, and `---` is translated into
`&mdash;`. This differs from most smartypants processors, which
turn a single hyphen into an ndash and a double hyphen into an
mdash.
* **Smart fractions**, where anything that looks like a fraction
is translated into suitable HTML (instead of just a few special
cases like most smartypant processors). For example, `4/5`
becomes `<sup>4</sup>&frasl;<sub>5</sub>`, which renders as
<sup>4</sup>&frasl;<sub>5</sub>.
Other renderers
---------------
Blackfriday is structured to allow alternative rendering engines. Here
are a few of note:
* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown):
provides a GitHub Flavored Markdown renderer with fenced code block
highlighting, clickable heading anchor links.
It's not customizable, and its goal is to produce HTML output
equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode),
except the rendering is performed locally.
* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
but for markdown.
* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex):
renders output as LaTeX.
* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience
integration with the [Chroma](https://github.com/alecthomas/chroma) code
highlighting library. bfchroma is only compatible with v2 of Blackfriday and
provides a drop-in renderer ready to use with Blackfriday, as well as
options and means for further customization.
* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer.
* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style
TODO
----
* More unit testing
* Improve Unicode support. It does not understand all Unicode
rules (about what constitutes a letter, a punctuation symbol,
etc.), so it may fail to detect word boundaries correctly in
some instances. It is safe on all UTF-8 input.
License
-------
[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt)
[1]: https://daringfireball.net/projects/markdown/ "Markdown"
[2]: https://golang.org/ "Go Language"
[3]: https://github.com/vmg/sundown "Sundown"
[4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func"
[5]: https://github.com/microcosm-cc/bluemonday "Bluemonday"
[BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2
[BuildV2URL]: https://travis-ci.org/russross/blackfriday
[PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2
[PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2

1612
vendor/github.com/russross/blackfriday/v2/block.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

46
vendor/github.com/russross/blackfriday/v2/doc.go generated vendored Normal file
View File

@ -0,0 +1,46 @@
// Package blackfriday is a markdown processor.
//
// It translates plain text with simple formatting rules into an AST, which can
// then be further processed to HTML (provided by Blackfriday itself) or other
// formats (provided by the community).
//
// The simplest way to invoke Blackfriday is to call the Run function. It will
// take a text input and produce a text output in HTML (or other format).
//
// A slightly more sophisticated way to use Blackfriday is to create a Markdown
// processor and to call Parse, which returns a syntax tree for the input
// document. You can leverage Blackfriday's parsing for content extraction from
// markdown documents. You can assign a custom renderer and set various options
// to the Markdown processor.
//
// If you're interested in calling Blackfriday from command line, see
// https://github.com/russross/blackfriday-tool.
//
// Sanitized Anchor Names
//
// Blackfriday includes an algorithm for creating sanitized anchor names
// corresponding to a given input text. This algorithm is used to create
// anchors for headings when AutoHeadingIDs extension is enabled. The
// algorithm is specified below, so that other packages can create
// compatible anchor names and links to those anchors.
//
// The algorithm iterates over the input text, interpreted as UTF-8,
// one Unicode code point (rune) at a time. All runes that are letters (category L)
// or numbers (category N) are considered valid characters. They are mapped to
// lower case, and included in the output. All other runes are considered
// invalid characters. Invalid characters that precede the first valid character,
// as well as invalid character that follow the last valid character
// are dropped completely. All other sequences of invalid characters
// between two valid characters are replaced with a single dash character '-'.
//
// SanitizedAnchorName exposes this functionality, and can be used to
// create compatible links to the anchor names generated by blackfriday.
// This algorithm is also implemented in a small standalone package at
// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients
// that want a small package and don't need full functionality of blackfriday.
package blackfriday
// NOTE: Keep Sanitized Anchor Name algorithm in sync with package
// github.com/shurcooL/sanitized_anchor_name.
// Otherwise, users of sanitized_anchor_name will get anchor names
// that are incompatible with those generated by blackfriday.

2236
vendor/github.com/russross/blackfriday/v2/entities.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

70
vendor/github.com/russross/blackfriday/v2/esc.go generated vendored Normal file
View File

@ -0,0 +1,70 @@
package blackfriday
import (
"html"
"io"
)
var htmlEscaper = [256][]byte{
'&': []byte("&amp;"),
'<': []byte("&lt;"),
'>': []byte("&gt;"),
'"': []byte("&quot;"),
}
func escapeHTML(w io.Writer, s []byte) {
escapeEntities(w, s, false)
}
func escapeAllHTML(w io.Writer, s []byte) {
escapeEntities(w, s, true)
}
func escapeEntities(w io.Writer, s []byte, escapeValidEntities bool) {
var start, end int
for end < len(s) {
escSeq := htmlEscaper[s[end]]
if escSeq != nil {
isEntity, entityEnd := nodeIsEntity(s, end)
if isEntity && !escapeValidEntities {
w.Write(s[start : entityEnd+1])
start = entityEnd + 1
} else {
w.Write(s[start:end])
w.Write(escSeq)
start = end + 1
}
}
end++
}
if start < len(s) && end <= len(s) {
w.Write(s[start:end])
}
}
func nodeIsEntity(s []byte, end int) (isEntity bool, endEntityPos int) {
isEntity = false
endEntityPos = end + 1
if s[end] == '&' {
for endEntityPos < len(s) {
if s[endEntityPos] == ';' {
if entities[string(s[end:endEntityPos+1])] {
isEntity = true
break
}
}
if !isalnum(s[endEntityPos]) && s[endEntityPos] != '&' && s[endEntityPos] != '#' {
break
}
endEntityPos++
}
}
return isEntity, endEntityPos
}
func escLink(w io.Writer, text []byte) {
unesc := html.UnescapeString(string(text))
escapeHTML(w, []byte(unesc))
}

952
vendor/github.com/russross/blackfriday/v2/html.go generated vendored Normal file
View File

@ -0,0 +1,952 @@
//
// Blackfriday Markdown Processor
// Available at http://github.com/russross/blackfriday
//
// Copyright © 2011 Russ Ross <russ@russross.com>.
// Distributed under the Simplified BSD License.
// See README.md for details.
//
//
//
// HTML rendering backend
//
//
package blackfriday
import (
"bytes"
"fmt"
"io"
"regexp"
"strings"
)
// HTMLFlags control optional behavior of HTML renderer.
type HTMLFlags int
// HTML renderer configuration options.
const (
HTMLFlagsNone HTMLFlags = 0
SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks
SkipImages // Skip embedded images
SkipLinks // Skip all links
Safelink // Only link to trusted protocols
NofollowLinks // Only link with rel="nofollow"
NoreferrerLinks // Only link with rel="noreferrer"
NoopenerLinks // Only link with rel="noopener"
HrefTargetBlank // Add a blank target
CompletePage // Generate a complete HTML page
UseXHTML // Generate XHTML output instead of HTML
FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source
Smartypants // Enable smart punctuation substitutions
SmartypantsFractions // Enable smart fractions (with Smartypants)
SmartypantsDashes // Enable smart dashes (with Smartypants)
SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants)
SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering
SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants)
TOC // Generate a table of contents
)
var (
htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag)
)
const (
htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" +
processingInstruction + "|" + declaration + "|" + cdata + ")"
closeTag = "</" + tagName + "\\s*[>]"
openTag = "<" + tagName + attribute + "*" + "\\s*/?>"
attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)"
attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")"
attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")"
attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*"
cdata = "<!\\[CDATA\\[[\\s\\S]*?\\]\\]>"
declaration = "<![A-Z]+" + "\\s+[^>]*>"
doubleQuotedValue = "\"[^\"]*\""
htmlComment = "<!---->|<!--(?:-?[^>-])(?:-?[^-])*-->"
processingInstruction = "[<][?].*?[?][>]"
singleQuotedValue = "'[^']*'"
tagName = "[A-Za-z][A-Za-z0-9-]*"
unquotedValue = "[^\"'=<>`\\x00-\\x20]+"
)
// HTMLRendererParameters is a collection of supplementary parameters tweaking
// the behavior of various parts of HTML renderer.
type HTMLRendererParameters struct {
// Prepend this text to each relative URL.
AbsolutePrefix string
// Add this text to each footnote anchor, to ensure uniqueness.
FootnoteAnchorPrefix string
// Show this text inside the <a> tag for a footnote return link, if the
// HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string
// <sup>[return]</sup> is used.
FootnoteReturnLinkContents string
// If set, add this text to the front of each Heading ID, to ensure
// uniqueness.
HeadingIDPrefix string
// If set, add this text to the back of each Heading ID, to ensure uniqueness.
HeadingIDSuffix string
// Increase heading levels: if the offset is 1, <h1> becomes <h2> etc.
// Negative offset is also valid.
// Resulting levels are clipped between 1 and 6.
HeadingLevelOffset int
Title string // Document title (used if CompletePage is set)
CSS string // Optional CSS file URL (used if CompletePage is set)
Icon string // Optional icon file URL (used if CompletePage is set)
Flags HTMLFlags // Flags allow customizing this renderer's behavior
}
// HTMLRenderer is a type that implements the Renderer interface for HTML output.
//
// Do not create this directly, instead use the NewHTMLRenderer function.
type HTMLRenderer struct {
HTMLRendererParameters
closeTag string // how to end singleton tags: either " />" or ">"
// Track heading IDs to prevent ID collision in a single generation.
headingIDs map[string]int
lastOutputLen int
disableTags int
sr *SPRenderer
}
const (
xhtmlClose = " />"
htmlClose = ">"
)
// NewHTMLRenderer creates and configures an HTMLRenderer object, which
// satisfies the Renderer interface.
func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer {
// configure the rendering engine
closeTag := htmlClose
if params.Flags&UseXHTML != 0 {
closeTag = xhtmlClose
}
if params.FootnoteReturnLinkContents == "" {
// U+FE0E is VARIATION SELECTOR-15.
// It suppresses automatic emoji presentation of the preceding
// U+21A9 LEFTWARDS ARROW WITH HOOK on iOS and iPadOS.
params.FootnoteReturnLinkContents = "<span aria-label='Return'>↩\ufe0e</span>"
}
return &HTMLRenderer{
HTMLRendererParameters: params,
closeTag: closeTag,
headingIDs: make(map[string]int),
sr: NewSmartypantsRenderer(params.Flags),
}
}
func isHTMLTag(tag []byte, tagname string) bool {
found, _ := findHTMLTagPos(tag, tagname)
return found
}
// Look for a character, but ignore it when it's in any kind of quotes, it
// might be JavaScript
func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int {
inSingleQuote := false
inDoubleQuote := false
inGraveQuote := false
i := start
for i < len(html) {
switch {
case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote:
return i
case html[i] == '\'':
inSingleQuote = !inSingleQuote
case html[i] == '"':
inDoubleQuote = !inDoubleQuote
case html[i] == '`':
inGraveQuote = !inGraveQuote
}
i++
}
return start
}
func findHTMLTagPos(tag []byte, tagname string) (bool, int) {
i := 0
if i < len(tag) && tag[0] != '<' {
return false, -1
}
i++
i = skipSpace(tag, i)
if i < len(tag) && tag[i] == '/' {
i++
}
i = skipSpace(tag, i)
j := 0
for ; i < len(tag); i, j = i+1, j+1 {
if j >= len(tagname) {
break
}
if strings.ToLower(string(tag[i]))[0] != tagname[j] {
return false, -1
}
}
if i == len(tag) {
return false, -1
}
rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>')
if rightAngle >= i {
return true, rightAngle
}
return false, -1
}
func skipSpace(tag []byte, i int) int {
for i < len(tag) && isspace(tag[i]) {
i++
}
return i
}
func isRelativeLink(link []byte) (yes bool) {
// a tag begin with '#'
if link[0] == '#' {
return true
}
// link begin with '/' but not '//', the second maybe a protocol relative link
if len(link) >= 2 && link[0] == '/' && link[1] != '/' {
return true
}
// only the root '/'
if len(link) == 1 && link[0] == '/' {
return true
}
// current directory : begin with "./"
if bytes.HasPrefix(link, []byte("./")) {
return true
}
// parent directory : begin with "../"
if bytes.HasPrefix(link, []byte("../")) {
return true
}
return false
}
func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string {
for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] {
tmp := fmt.Sprintf("%s-%d", id, count+1)
if _, tmpFound := r.headingIDs[tmp]; !tmpFound {
r.headingIDs[id] = count + 1
id = tmp
} else {
id = id + "-1"
}
}
if _, found := r.headingIDs[id]; !found {
r.headingIDs[id] = 0
}
return id
}
func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte {
if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' {
newDest := r.AbsolutePrefix
if link[0] != '/' {
newDest += "/"
}
newDest += string(link)
return []byte(newDest)
}
return link
}
func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string {
if isRelativeLink(link) {
return attrs
}
val := []string{}
if flags&NofollowLinks != 0 {
val = append(val, "nofollow")
}
if flags&NoreferrerLinks != 0 {
val = append(val, "noreferrer")
}
if flags&NoopenerLinks != 0 {
val = append(val, "noopener")
}
if flags&HrefTargetBlank != 0 {
attrs = append(attrs, "target=\"_blank\"")
}
if len(val) == 0 {
return attrs
}
attr := fmt.Sprintf("rel=%q", strings.Join(val, " "))
return append(attrs, attr)
}
func isMailto(link []byte) bool {
return bytes.HasPrefix(link, []byte("mailto:"))
}
func needSkipLink(flags HTMLFlags, dest []byte) bool {
if flags&SkipLinks != 0 {
return true
}
return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest)
}
func isSmartypantable(node *Node) bool {
pt := node.Parent.Type
return pt != Link && pt != CodeBlock && pt != Code
}
func appendLanguageAttr(attrs []string, info []byte) []string {
if len(info) == 0 {
return attrs
}
endOfLang := bytes.IndexAny(info, "\t ")
if endOfLang < 0 {
endOfLang = len(info)
}
return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang]))
}
func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) {
w.Write(name)
if len(attrs) > 0 {
w.Write(spaceBytes)
w.Write([]byte(strings.Join(attrs, " ")))
}
w.Write(gtBytes)
r.lastOutputLen = 1
}
func footnoteRef(prefix string, node *Node) []byte {
urlFrag := prefix + string(slugify(node.Destination))
anchor := fmt.Sprintf(`<a href="#fn:%s">%d</a>`, urlFrag, node.NoteID)
return []byte(fmt.Sprintf(`<sup class="footnote-ref" id="fnref:%s">%s</sup>`, urlFrag, anchor))
}
func footnoteItem(prefix string, slug []byte) []byte {
return []byte(fmt.Sprintf(`<li id="fn:%s%s">`, prefix, slug))
}
func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte {
const format = ` <a class="footnote-return" href="#fnref:%s%s">%s</a>`
return []byte(fmt.Sprintf(format, prefix, slug, returnLink))
}
func itemOpenCR(node *Node) bool {
if node.Prev == nil {
return false
}
ld := node.Parent.ListData
return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0
}
func skipParagraphTags(node *Node) bool {
grandparent := node.Parent.Parent
if grandparent == nil || grandparent.Type != List {
return false
}
tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0
return grandparent.Type == List && tightOrTerm
}
func cellAlignment(align CellAlignFlags) string {
switch align {
case TableAlignmentLeft:
return "left"
case TableAlignmentRight:
return "right"
case TableAlignmentCenter:
return "center"
default:
return ""
}
}
func (r *HTMLRenderer) out(w io.Writer, text []byte) {
if r.disableTags > 0 {
w.Write(htmlTagRe.ReplaceAll(text, []byte{}))
} else {
w.Write(text)
}
r.lastOutputLen = len(text)
}
func (r *HTMLRenderer) cr(w io.Writer) {
if r.lastOutputLen > 0 {
r.out(w, nlBytes)
}
}
var (
nlBytes = []byte{'\n'}
gtBytes = []byte{'>'}
spaceBytes = []byte{' '}
)
var (
brTag = []byte("<br>")
brXHTMLTag = []byte("<br />")
emTag = []byte("<em>")
emCloseTag = []byte("</em>")
strongTag = []byte("<strong>")
strongCloseTag = []byte("</strong>")
delTag = []byte("<del>")
delCloseTag = []byte("</del>")
ttTag = []byte("<tt>")
ttCloseTag = []byte("</tt>")
aTag = []byte("<a")
aCloseTag = []byte("</a>")
preTag = []byte("<pre>")
preCloseTag = []byte("</pre>")
codeTag = []byte("<code>")
codeCloseTag = []byte("</code>")
pTag = []byte("<p>")
pCloseTag = []byte("</p>")
blockquoteTag = []byte("<blockquote>")
blockquoteCloseTag = []byte("</blockquote>")
hrTag = []byte("<hr>")
hrXHTMLTag = []byte("<hr />")
ulTag = []byte("<ul>")
ulCloseTag = []byte("</ul>")
olTag = []byte("<ol>")
olCloseTag = []byte("</ol>")
dlTag = []byte("<dl>")
dlCloseTag = []byte("</dl>")
liTag = []byte("<li>")
liCloseTag = []byte("</li>")
ddTag = []byte("<dd>")
ddCloseTag = []byte("</dd>")
dtTag = []byte("<dt>")
dtCloseTag = []byte("</dt>")
tableTag = []byte("<table>")
tableCloseTag = []byte("</table>")
tdTag = []byte("<td")
tdCloseTag = []byte("</td>")
thTag = []byte("<th")
thCloseTag = []byte("</th>")
theadTag = []byte("<thead>")
theadCloseTag = []byte("</thead>")
tbodyTag = []byte("<tbody>")
tbodyCloseTag = []byte("</tbody>")
trTag = []byte("<tr>")
trCloseTag = []byte("</tr>")
h1Tag = []byte("<h1")
h1CloseTag = []byte("</h1>")
h2Tag = []byte("<h2")
h2CloseTag = []byte("</h2>")
h3Tag = []byte("<h3")
h3CloseTag = []byte("</h3>")
h4Tag = []byte("<h4")
h4CloseTag = []byte("</h4>")
h5Tag = []byte("<h5")
h5CloseTag = []byte("</h5>")
h6Tag = []byte("<h6")
h6CloseTag = []byte("</h6>")
footnotesDivBytes = []byte("\n<div class=\"footnotes\">\n\n")
footnotesCloseDivBytes = []byte("\n</div>\n")
)
func headingTagsFromLevel(level int) ([]byte, []byte) {
if level <= 1 {
return h1Tag, h1CloseTag
}
switch level {
case 2:
return h2Tag, h2CloseTag
case 3:
return h3Tag, h3CloseTag
case 4:
return h4Tag, h4CloseTag
case 5:
return h5Tag, h5CloseTag
}
return h6Tag, h6CloseTag
}
func (r *HTMLRenderer) outHRTag(w io.Writer) {
if r.Flags&UseXHTML == 0 {
r.out(w, hrTag)
} else {
r.out(w, hrXHTMLTag)
}
}
// RenderNode is a default renderer of a single node of a syntax tree. For
// block nodes it will be called twice: first time with entering=true, second
// time with entering=false, so that it could know when it's working on an open
// tag and when on close. It writes the result to w.
//
// The return value is a way to tell the calling walker to adjust its walk
// pattern: e.g. it can terminate the traversal by returning Terminate. Or it
// can ask the walker to skip a subtree of this node by returning SkipChildren.
// The typical behavior is to return GoToNext, which asks for the usual
// traversal to the next node.
func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus {
attrs := []string{}
switch node.Type {
case Text:
if r.Flags&Smartypants != 0 {
var tmp bytes.Buffer
escapeHTML(&tmp, node.Literal)
r.sr.Process(w, tmp.Bytes())
} else {
if node.Parent.Type == Link {
escLink(w, node.Literal)
} else {
escapeHTML(w, node.Literal)
}
}
case Softbreak:
r.cr(w)
// TODO: make it configurable via out(renderer.softbreak)
case Hardbreak:
if r.Flags&UseXHTML == 0 {
r.out(w, brTag)
} else {
r.out(w, brXHTMLTag)
}
r.cr(w)
case Emph:
if entering {
r.out(w, emTag)
} else {
r.out(w, emCloseTag)
}
case Strong:
if entering {
r.out(w, strongTag)
} else {
r.out(w, strongCloseTag)
}
case Del:
if entering {
r.out(w, delTag)
} else {
r.out(w, delCloseTag)
}
case HTMLSpan:
if r.Flags&SkipHTML != 0 {
break
}
r.out(w, node.Literal)
case Link:
// mark it but don't link it if it is not a safe link: no smartypants
dest := node.LinkData.Destination
if needSkipLink(r.Flags, dest) {
if entering {
r.out(w, ttTag)
} else {
r.out(w, ttCloseTag)
}
} else {
if entering {
dest = r.addAbsPrefix(dest)
var hrefBuf bytes.Buffer
hrefBuf.WriteString("href=\"")
escLink(&hrefBuf, dest)
hrefBuf.WriteByte('"')
attrs = append(attrs, hrefBuf.String())
if node.NoteID != 0 {
r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node))
break
}
attrs = appendLinkAttrs(attrs, r.Flags, dest)
if len(node.LinkData.Title) > 0 {
var titleBuff bytes.Buffer
titleBuff.WriteString("title=\"")
escapeHTML(&titleBuff, node.LinkData.Title)
titleBuff.WriteByte('"')
attrs = append(attrs, titleBuff.String())
}
r.tag(w, aTag, attrs)
} else {
if node.NoteID != 0 {
break
}
r.out(w, aCloseTag)
}
}
case Image:
if r.Flags&SkipImages != 0 {
return SkipChildren
}
if entering {
dest := node.LinkData.Destination
dest = r.addAbsPrefix(dest)
if r.disableTags == 0 {
//if options.safe && potentiallyUnsafe(dest) {
//out(w, `<img src="" alt="`)
//} else {
r.out(w, []byte(`<img src="`))
escLink(w, dest)
r.out(w, []byte(`" alt="`))
//}
}
r.disableTags++
} else {
r.disableTags--
if r.disableTags == 0 {
if node.LinkData.Title != nil {
r.out(w, []byte(`" title="`))
escapeHTML(w, node.LinkData.Title)
}
r.out(w, []byte(`" />`))
}
}
case Code:
r.out(w, codeTag)
escapeAllHTML(w, node.Literal)
r.out(w, codeCloseTag)
case Document:
break
case Paragraph:
if skipParagraphTags(node) {
break
}
if entering {
// TODO: untangle this clusterfuck about when the newlines need
// to be added and when not.
if node.Prev != nil {
switch node.Prev.Type {
case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule:
r.cr(w)
}
}
if node.Parent.Type == BlockQuote && node.Prev == nil {
r.cr(w)
}
r.out(w, pTag)
} else {
r.out(w, pCloseTag)
if !(node.Parent.Type == Item && node.Next == nil) {
r.cr(w)
}
}
case BlockQuote:
if entering {
r.cr(w)
r.out(w, blockquoteTag)
} else {
r.out(w, blockquoteCloseTag)
r.cr(w)
}
case HTMLBlock:
if r.Flags&SkipHTML != 0 {
break
}
r.cr(w)
r.out(w, node.Literal)
r.cr(w)
case Heading:
headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level
openTag, closeTag := headingTagsFromLevel(headingLevel)
if entering {
if node.IsTitleblock {
attrs = append(attrs, `class="title"`)
}
if node.HeadingID != "" {
id := r.ensureUniqueHeadingID(node.HeadingID)
if r.HeadingIDPrefix != "" {
id = r.HeadingIDPrefix + id
}
if r.HeadingIDSuffix != "" {
id = id + r.HeadingIDSuffix
}
attrs = append(attrs, fmt.Sprintf(`id="%s"`, id))
}
r.cr(w)
r.tag(w, openTag, attrs)
} else {
r.out(w, closeTag)
if !(node.Parent.Type == Item && node.Next == nil) {
r.cr(w)
}
}
case HorizontalRule:
r.cr(w)
r.outHRTag(w)
r.cr(w)
case List:
openTag := ulTag
closeTag := ulCloseTag
if node.ListFlags&ListTypeOrdered != 0 {
openTag = olTag
closeTag = olCloseTag
}
if node.ListFlags&ListTypeDefinition != 0 {
openTag = dlTag
closeTag = dlCloseTag
}
if entering {
if node.IsFootnotesList {
r.out(w, footnotesDivBytes)
r.outHRTag(w)
r.cr(w)
}
r.cr(w)
if node.Parent.Type == Item && node.Parent.Parent.Tight {
r.cr(w)
}
r.tag(w, openTag[:len(openTag)-1], attrs)
r.cr(w)
} else {
r.out(w, closeTag)
//cr(w)
//if node.parent.Type != Item {
// cr(w)
//}
if node.Parent.Type == Item && node.Next != nil {
r.cr(w)
}
if node.Parent.Type == Document || node.Parent.Type == BlockQuote {
r.cr(w)
}
if node.IsFootnotesList {
r.out(w, footnotesCloseDivBytes)
}
}
case Item:
openTag := liTag
closeTag := liCloseTag
if node.ListFlags&ListTypeDefinition != 0 {
openTag = ddTag
closeTag = ddCloseTag
}
if node.ListFlags&ListTypeTerm != 0 {
openTag = dtTag
closeTag = dtCloseTag
}
if entering {
if itemOpenCR(node) {
r.cr(w)
}
if node.ListData.RefLink != nil {
slug := slugify(node.ListData.RefLink)
r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug))
break
}
r.out(w, openTag)
} else {
if node.ListData.RefLink != nil {
slug := slugify(node.ListData.RefLink)
if r.Flags&FootnoteReturnLinks != 0 {
r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug))
}
}
r.out(w, closeTag)
r.cr(w)
}
case CodeBlock:
attrs = appendLanguageAttr(attrs, node.Info)
r.cr(w)
r.out(w, preTag)
r.tag(w, codeTag[:len(codeTag)-1], attrs)
escapeAllHTML(w, node.Literal)
r.out(w, codeCloseTag)
r.out(w, preCloseTag)
if node.Parent.Type != Item {
r.cr(w)
}
case Table:
if entering {
r.cr(w)
r.out(w, tableTag)
} else {
r.out(w, tableCloseTag)
r.cr(w)
}
case TableCell:
openTag := tdTag
closeTag := tdCloseTag
if node.IsHeader {
openTag = thTag
closeTag = thCloseTag
}
if entering {
align := cellAlignment(node.Align)
if align != "" {
attrs = append(attrs, fmt.Sprintf(`align="%s"`, align))
}
if node.Prev == nil {
r.cr(w)
}
r.tag(w, openTag, attrs)
} else {
r.out(w, closeTag)
r.cr(w)
}
case TableHead:
if entering {
r.cr(w)
r.out(w, theadTag)
} else {
r.out(w, theadCloseTag)
r.cr(w)
}
case TableBody:
if entering {
r.cr(w)
r.out(w, tbodyTag)
// XXX: this is to adhere to a rather silly test. Should fix test.
if node.FirstChild == nil {
r.cr(w)
}
} else {
r.out(w, tbodyCloseTag)
r.cr(w)
}
case TableRow:
if entering {
r.cr(w)
r.out(w, trTag)
} else {
r.out(w, trCloseTag)
r.cr(w)
}
default:
panic("Unknown node type " + node.Type.String())
}
return GoToNext
}
// RenderHeader writes HTML document preamble and TOC if requested.
func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) {
r.writeDocumentHeader(w)
if r.Flags&TOC != 0 {
r.writeTOC(w, ast)
}
}
// RenderFooter writes HTML document footer.
func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) {
if r.Flags&CompletePage == 0 {
return
}
io.WriteString(w, "\n</body>\n</html>\n")
}
func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) {
if r.Flags&CompletePage == 0 {
return
}
ending := ""
if r.Flags&UseXHTML != 0 {
io.WriteString(w, "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Transitional//EN\" ")
io.WriteString(w, "\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd\">\n")
io.WriteString(w, "<html xmlns=\"http://www.w3.org/1999/xhtml\">\n")
ending = " /"
} else {
io.WriteString(w, "<!DOCTYPE html>\n")
io.WriteString(w, "<html>\n")
}
io.WriteString(w, "<head>\n")
io.WriteString(w, " <title>")
if r.Flags&Smartypants != 0 {
r.sr.Process(w, []byte(r.Title))
} else {
escapeHTML(w, []byte(r.Title))
}
io.WriteString(w, "</title>\n")
io.WriteString(w, " <meta name=\"GENERATOR\" content=\"Blackfriday Markdown Processor v")
io.WriteString(w, Version)
io.WriteString(w, "\"")
io.WriteString(w, ending)
io.WriteString(w, ">\n")
io.WriteString(w, " <meta charset=\"utf-8\"")
io.WriteString(w, ending)
io.WriteString(w, ">\n")
if r.CSS != "" {
io.WriteString(w, " <link rel=\"stylesheet\" type=\"text/css\" href=\"")
escapeHTML(w, []byte(r.CSS))
io.WriteString(w, "\"")
io.WriteString(w, ending)
io.WriteString(w, ">\n")
}
if r.Icon != "" {
io.WriteString(w, " <link rel=\"icon\" type=\"image/x-icon\" href=\"")
escapeHTML(w, []byte(r.Icon))
io.WriteString(w, "\"")
io.WriteString(w, ending)
io.WriteString(w, ">\n")
}
io.WriteString(w, "</head>\n")
io.WriteString(w, "<body>\n\n")
}
func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) {
buf := bytes.Buffer{}
inHeading := false
tocLevel := 0
headingCount := 0
ast.Walk(func(node *Node, entering bool) WalkStatus {
if node.Type == Heading && !node.HeadingData.IsTitleblock {
inHeading = entering
if entering {
node.HeadingID = fmt.Sprintf("toc_%d", headingCount)
if node.Level == tocLevel {
buf.WriteString("</li>\n\n<li>")
} else if node.Level < tocLevel {
for node.Level < tocLevel {
tocLevel--
buf.WriteString("</li>\n</ul>")
}
buf.WriteString("</li>\n\n<li>")
} else {
for node.Level > tocLevel {
tocLevel++
buf.WriteString("\n<ul>\n<li>")
}
}
fmt.Fprintf(&buf, `<a href="#toc_%d">`, headingCount)
headingCount++
} else {
buf.WriteString("</a>")
}
return GoToNext
}
if inHeading {
return r.RenderNode(&buf, node, entering)
}
return GoToNext
})
for ; tocLevel > 0; tocLevel-- {
buf.WriteString("</li>\n</ul>")
}
if buf.Len() > 0 {
io.WriteString(w, "<nav>\n")
w.Write(buf.Bytes())
io.WriteString(w, "\n\n</nav>\n")
}
r.lastOutputLen = buf.Len()
}

1228
vendor/github.com/russross/blackfriday/v2/inline.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

950
vendor/github.com/russross/blackfriday/v2/markdown.go generated vendored Normal file
View File

@ -0,0 +1,950 @@
// Blackfriday Markdown Processor
// Available at http://github.com/russross/blackfriday
//
// Copyright © 2011 Russ Ross <russ@russross.com>.
// Distributed under the Simplified BSD License.
// See README.md for details.
package blackfriday
import (
"bytes"
"fmt"
"io"
"strings"
"unicode/utf8"
)
//
// Markdown parsing and processing
//
// Version string of the package. Appears in the rendered document when
// CompletePage flag is on.
const Version = "2.0"
// Extensions is a bitwise or'ed collection of enabled Blackfriday's
// extensions.
type Extensions int
// These are the supported markdown parsing extensions.
// OR these values together to select multiple extensions.
const (
NoExtensions Extensions = 0
NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words
Tables // Render tables
FencedCode // Render fenced code blocks
Autolink // Detect embedded URLs that are not explicitly marked
Strikethrough // Strikethrough text using ~~test~~
LaxHTMLBlocks // Loosen up HTML block parsing rules
SpaceHeadings // Be strict about prefix heading rules
HardLineBreak // Translate newlines into line breaks
TabSizeEight // Expand tabs to eight spaces instead of four
Footnotes // Pandoc-style footnotes
NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block
HeadingIDs // specify heading IDs with {#id}
Titleblock // Titleblock ala pandoc
AutoHeadingIDs // Create the heading ID from the text
BackslashLineBreak // Translate trailing backslashes into line breaks
DefinitionLists // Render definition lists
CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants |
SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes
CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode |
Autolink | Strikethrough | SpaceHeadings | HeadingIDs |
BackslashLineBreak | DefinitionLists
)
// ListType contains bitwise or'ed flags for list and list item objects.
type ListType int
// These are the possible flag values for the ListItem renderer.
// Multiple flag values may be ORed together.
// These are mostly of interest if you are writing a new output format.
const (
ListTypeOrdered ListType = 1 << iota
ListTypeDefinition
ListTypeTerm
ListItemContainsBlock
ListItemBeginningOfList // TODO: figure out if this is of any use now
ListItemEndOfList
)
// CellAlignFlags holds a type of alignment in a table cell.
type CellAlignFlags int
// These are the possible flag values for the table cell renderer.
// Only a single one of these values will be used; they are not ORed together.
// These are mostly of interest if you are writing a new output format.
const (
TableAlignmentLeft CellAlignFlags = 1 << iota
TableAlignmentRight
TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight)
)
// The size of a tab stop.
const (
TabSizeDefault = 4
TabSizeDouble = 8
)
// blockTags is a set of tags that are recognized as HTML block tags.
// Any of these can be included in markdown text without special escaping.
var blockTags = map[string]struct{}{
"blockquote": {},
"del": {},
"div": {},
"dl": {},
"fieldset": {},
"form": {},
"h1": {},
"h2": {},
"h3": {},
"h4": {},
"h5": {},
"h6": {},
"iframe": {},
"ins": {},
"math": {},
"noscript": {},
"ol": {},
"pre": {},
"p": {},
"script": {},
"style": {},
"table": {},
"ul": {},
// HTML5
"address": {},
"article": {},
"aside": {},
"canvas": {},
"figcaption": {},
"figure": {},
"footer": {},
"header": {},
"hgroup": {},
"main": {},
"nav": {},
"output": {},
"progress": {},
"section": {},
"video": {},
}
// Renderer is the rendering interface. This is mostly of interest if you are
// implementing a new rendering format.
//
// Only an HTML implementation is provided in this repository, see the README
// for external implementations.
type Renderer interface {
// RenderNode is the main rendering method. It will be called once for
// every leaf node and twice for every non-leaf node (first with
// entering=true, then with entering=false). The method should write its
// rendition of the node to the supplied writer w.
RenderNode(w io.Writer, node *Node, entering bool) WalkStatus
// RenderHeader is a method that allows the renderer to produce some
// content preceding the main body of the output document. The header is
// understood in the broad sense here. For example, the default HTML
// renderer will write not only the HTML document preamble, but also the
// table of contents if it was requested.
//
// The method will be passed an entire document tree, in case a particular
// implementation needs to inspect it to produce output.
//
// The output should be written to the supplied writer w. If your
// implementation has no header to write, supply an empty implementation.
RenderHeader(w io.Writer, ast *Node)
// RenderFooter is a symmetric counterpart of RenderHeader.
RenderFooter(w io.Writer, ast *Node)
}
// Callback functions for inline parsing. One such function is defined
// for each character that triggers a response when parsing inline data.
type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node)
// Markdown is a type that holds extensions and the runtime state used by
// Parse, and the renderer. You can not use it directly, construct it with New.
type Markdown struct {
renderer Renderer
referenceOverride ReferenceOverrideFunc
refs map[string]*reference
inlineCallback [256]inlineParser
extensions Extensions
nesting int
maxNesting int
insideLink bool
// Footnotes need to be ordered as well as available to quickly check for
// presence. If a ref is also a footnote, it's stored both in refs and here
// in notes. Slice is nil if footnotes not enabled.
notes []*reference
doc *Node
tip *Node // = doc
oldTip *Node
lastMatchedContainer *Node // = doc
allClosed bool
}
func (p *Markdown) getRef(refid string) (ref *reference, found bool) {
if p.referenceOverride != nil {
r, overridden := p.referenceOverride(refid)
if overridden {
if r == nil {
return nil, false
}
return &reference{
link: []byte(r.Link),
title: []byte(r.Title),
noteID: 0,
hasBlock: false,
text: []byte(r.Text)}, true
}
}
// refs are case insensitive
ref, found = p.refs[strings.ToLower(refid)]
return ref, found
}
func (p *Markdown) finalize(block *Node) {
above := block.Parent
block.open = false
p.tip = above
}
func (p *Markdown) addChild(node NodeType, offset uint32) *Node {
return p.addExistingChild(NewNode(node), offset)
}
func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node {
for !p.tip.canContain(node.Type) {
p.finalize(p.tip)
}
p.tip.AppendChild(node)
p.tip = node
return node
}
func (p *Markdown) closeUnmatchedBlocks() {
if !p.allClosed {
for p.oldTip != p.lastMatchedContainer {
parent := p.oldTip.Parent
p.finalize(p.oldTip)
p.oldTip = parent
}
p.allClosed = true
}
}
//
//
// Public interface
//
//
// Reference represents the details of a link.
// See the documentation in Options for more details on use-case.
type Reference struct {
// Link is usually the URL the reference points to.
Link string
// Title is the alternate text describing the link in more detail.
Title string
// Text is the optional text to override the ref with if the syntax used was
// [refid][]
Text string
}
// ReferenceOverrideFunc is expected to be called with a reference string and
// return either a valid Reference type that the reference string maps to or
// nil. If overridden is false, the default reference logic will be executed.
// See the documentation in Options for more details on use-case.
type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool)
// New constructs a Markdown processor. You can use the same With* functions as
// for Run() to customize parser's behavior and the renderer.
func New(opts ...Option) *Markdown {
var p Markdown
for _, opt := range opts {
opt(&p)
}
p.refs = make(map[string]*reference)
p.maxNesting = 16
p.insideLink = false
docNode := NewNode(Document)
p.doc = docNode
p.tip = docNode
p.oldTip = docNode
p.lastMatchedContainer = docNode
p.allClosed = true
// register inline parsers
p.inlineCallback[' '] = maybeLineBreak
p.inlineCallback['*'] = emphasis
p.inlineCallback['_'] = emphasis
if p.extensions&Strikethrough != 0 {
p.inlineCallback['~'] = emphasis
}
p.inlineCallback['`'] = codeSpan
p.inlineCallback['\n'] = lineBreak
p.inlineCallback['['] = link
p.inlineCallback['<'] = leftAngle
p.inlineCallback['\\'] = escape
p.inlineCallback['&'] = entity
p.inlineCallback['!'] = maybeImage
p.inlineCallback['^'] = maybeInlineFootnote
if p.extensions&Autolink != 0 {
p.inlineCallback['h'] = maybeAutoLink
p.inlineCallback['m'] = maybeAutoLink
p.inlineCallback['f'] = maybeAutoLink
p.inlineCallback['H'] = maybeAutoLink
p.inlineCallback['M'] = maybeAutoLink
p.inlineCallback['F'] = maybeAutoLink
}
if p.extensions&Footnotes != 0 {
p.notes = make([]*reference, 0)
}
return &p
}
// Option customizes the Markdown processor's default behavior.
type Option func(*Markdown)
// WithRenderer allows you to override the default renderer.
func WithRenderer(r Renderer) Option {
return func(p *Markdown) {
p.renderer = r
}
}
// WithExtensions allows you to pick some of the many extensions provided by
// Blackfriday. You can bitwise OR them.
func WithExtensions(e Extensions) Option {
return func(p *Markdown) {
p.extensions = e
}
}
// WithNoExtensions turns off all extensions and custom behavior.
func WithNoExtensions() Option {
return func(p *Markdown) {
p.extensions = NoExtensions
p.renderer = NewHTMLRenderer(HTMLRendererParameters{
Flags: HTMLFlagsNone,
})
}
}
// WithRefOverride sets an optional function callback that is called every
// time a reference is resolved.
//
// In Markdown, the link reference syntax can be made to resolve a link to
// a reference instead of an inline URL, in one of the following ways:
//
// * [link text][refid]
// * [refid][]
//
// Usually, the refid is defined at the bottom of the Markdown document. If
// this override function is provided, the refid is passed to the override
// function first, before consulting the defined refids at the bottom. If
// the override function indicates an override did not occur, the refids at
// the bottom will be used to fill in the link details.
func WithRefOverride(o ReferenceOverrideFunc) Option {
return func(p *Markdown) {
p.referenceOverride = o
}
}
// Run is the main entry point to Blackfriday. It parses and renders a
// block of markdown-encoded text.
//
// The simplest invocation of Run takes one argument, input:
// output := Run(input)
// This will parse the input with CommonExtensions enabled and render it with
// the default HTMLRenderer (with CommonHTMLFlags).
//
// Variadic arguments opts can customize the default behavior. Since Markdown
// type does not contain exported fields, you can not use it directly. Instead,
// use the With* functions. For example, this will call the most basic
// functionality, with no extensions:
// output := Run(input, WithNoExtensions())
//
// You can use any number of With* arguments, even contradicting ones. They
// will be applied in order of appearance and the latter will override the
// former:
// output := Run(input, WithNoExtensions(), WithExtensions(exts),
// WithRenderer(yourRenderer))
func Run(input []byte, opts ...Option) []byte {
r := NewHTMLRenderer(HTMLRendererParameters{
Flags: CommonHTMLFlags,
})
optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)}
optList = append(optList, opts...)
parser := New(optList...)
ast := parser.Parse(input)
var buf bytes.Buffer
parser.renderer.RenderHeader(&buf, ast)
ast.Walk(func(node *Node, entering bool) WalkStatus {
return parser.renderer.RenderNode(&buf, node, entering)
})
parser.renderer.RenderFooter(&buf, ast)
return buf.Bytes()
}
// Parse is an entry point to the parsing part of Blackfriday. It takes an
// input markdown document and produces a syntax tree for its contents. This
// tree can then be rendered with a default or custom renderer, or
// analyzed/transformed by the caller to whatever non-standard needs they have.
// The return value is the root node of the syntax tree.
func (p *Markdown) Parse(input []byte) *Node {
p.block(input)
// Walk the tree and finish up some of unfinished blocks
for p.tip != nil {
p.finalize(p.tip)
}
// Walk the tree again and process inline markdown in each block
p.doc.Walk(func(node *Node, entering bool) WalkStatus {
if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell {
p.inline(node, node.content)
node.content = nil
}
return GoToNext
})
p.parseRefsToAST()
return p.doc
}
func (p *Markdown) parseRefsToAST() {
if p.extensions&Footnotes == 0 || len(p.notes) == 0 {
return
}
p.tip = p.doc
block := p.addBlock(List, nil)
block.IsFootnotesList = true
block.ListFlags = ListTypeOrdered
flags := ListItemBeginningOfList
// Note: this loop is intentionally explicit, not range-form. This is
// because the body of the loop will append nested footnotes to p.notes and
// we need to process those late additions. Range form would only walk over
// the fixed initial set.
for i := 0; i < len(p.notes); i++ {
ref := p.notes[i]
p.addExistingChild(ref.footnote, 0)
block := ref.footnote
block.ListFlags = flags | ListTypeOrdered
block.RefLink = ref.link
if ref.hasBlock {
flags |= ListItemContainsBlock
p.block(ref.title)
} else {
p.inline(block, ref.title)
}
flags &^= ListItemBeginningOfList | ListItemContainsBlock
}
above := block.Parent
finalizeList(block)
p.tip = above
block.Walk(func(node *Node, entering bool) WalkStatus {
if node.Type == Paragraph || node.Type == Heading {
p.inline(node, node.content)
node.content = nil
}
return GoToNext
})
}
//
// Link references
//
// This section implements support for references that (usually) appear
// as footnotes in a document, and can be referenced anywhere in the document.
// The basic format is:
//
// [1]: http://www.google.com/ "Google"
// [2]: http://www.github.com/ "Github"
//
// Anywhere in the document, the reference can be linked by referring to its
// label, i.e., 1 and 2 in this example, as in:
//
// This library is hosted on [Github][2], a git hosting site.
//
// Actual footnotes as specified in Pandoc and supported by some other Markdown
// libraries such as php-markdown are also taken care of. They look like this:
//
// This sentence needs a bit of further explanation.[^note]
//
// [^note]: This is the explanation.
//
// Footnotes should be placed at the end of the document in an ordered list.
// Finally, there are inline footnotes such as:
//
// Inline footnotes^[Also supported.] provide a quick inline explanation,
// but are rendered at the bottom of the document.
//
// reference holds all information necessary for a reference-style links or
// footnotes.
//
// Consider this markdown with reference-style links:
//
// [link][ref]
//
// [ref]: /url/ "tooltip title"
//
// It will be ultimately converted to this HTML:
//
// <p><a href=\"/url/\" title=\"title\">link</a></p>
//
// And a reference structure will be populated as follows:
//
// p.refs["ref"] = &reference{
// link: "/url/",
// title: "tooltip title",
// }
//
// Alternatively, reference can contain information about a footnote. Consider
// this markdown:
//
// Text needing a footnote.[^a]
//
// [^a]: This is the note
//
// A reference structure will be populated as follows:
//
// p.refs["a"] = &reference{
// link: "a",
// title: "This is the note",
// noteID: <some positive int>,
// }
//
// TODO: As you can see, it begs for splitting into two dedicated structures
// for refs and for footnotes.
type reference struct {
link []byte
title []byte
noteID int // 0 if not a footnote ref
hasBlock bool
footnote *Node // a link to the Item node within a list of footnotes
text []byte // only gets populated by refOverride feature with Reference.Text
}
func (r *reference) String() string {
return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}",
r.link, r.title, r.text, r.noteID, r.hasBlock)
}
// Check whether or not data starts with a reference link.
// If so, it is parsed and stored in the list of references
// (in the render struct).
// Returns the number of bytes to skip to move past it,
// or zero if the first line is not a reference.
func isReference(p *Markdown, data []byte, tabSize int) int {
// up to 3 optional leading spaces
if len(data) < 4 {
return 0
}
i := 0
for i < 3 && data[i] == ' ' {
i++
}
noteID := 0
// id part: anything but a newline between brackets
if data[i] != '[' {
return 0
}
i++
if p.extensions&Footnotes != 0 {
if i < len(data) && data[i] == '^' {
// we can set it to anything here because the proper noteIds will
// be assigned later during the second pass. It just has to be != 0
noteID = 1
i++
}
}
idOffset := i
for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' {
i++
}
if i >= len(data) || data[i] != ']' {
return 0
}
idEnd := i
// footnotes can have empty ID, like this: [^], but a reference can not be
// empty like this: []. Break early if it's not a footnote and there's no ID
if noteID == 0 && idOffset == idEnd {
return 0
}
// spacer: colon (space | tab)* newline? (space | tab)*
i++
if i >= len(data) || data[i] != ':' {
return 0
}
i++
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
i++
}
if i < len(data) && (data[i] == '\n' || data[i] == '\r') {
i++
if i < len(data) && data[i] == '\n' && data[i-1] == '\r' {
i++
}
}
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
i++
}
if i >= len(data) {
return 0
}
var (
linkOffset, linkEnd int
titleOffset, titleEnd int
lineEnd int
raw []byte
hasBlock bool
)
if p.extensions&Footnotes != 0 && noteID != 0 {
linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize)
lineEnd = linkEnd
} else {
linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i)
}
if lineEnd == 0 {
return 0
}
// a valid ref has been found
ref := &reference{
noteID: noteID,
hasBlock: hasBlock,
}
if noteID > 0 {
// reusing the link field for the id since footnotes don't have links
ref.link = data[idOffset:idEnd]
// if footnote, it's not really a title, it's the contained text
ref.title = raw
} else {
ref.link = data[linkOffset:linkEnd]
ref.title = data[titleOffset:titleEnd]
}
// id matches are case-insensitive
id := string(bytes.ToLower(data[idOffset:idEnd]))
p.refs[id] = ref
return lineEnd
}
func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) {
// link: whitespace-free sequence, optionally between angle brackets
if data[i] == '<' {
i++
}
linkOffset = i
for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' {
i++
}
linkEnd = i
if data[linkOffset] == '<' && data[linkEnd-1] == '>' {
linkOffset++
linkEnd--
}
// optional spacer: (space | tab)* (newline | '\'' | '"' | '(' )
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
i++
}
if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' {
return
}
// compute end-of-line
if i >= len(data) || data[i] == '\r' || data[i] == '\n' {
lineEnd = i
}
if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' {
lineEnd++
}
// optional (space|tab)* spacer after a newline
if lineEnd > 0 {
i = lineEnd + 1
for i < len(data) && (data[i] == ' ' || data[i] == '\t') {
i++
}
}
// optional title: any non-newline sequence enclosed in '"() alone on its line
if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') {
i++
titleOffset = i
// look for EOL
for i < len(data) && data[i] != '\n' && data[i] != '\r' {
i++
}
if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' {
titleEnd = i + 1
} else {
titleEnd = i
}
// step back
i--
for i > titleOffset && (data[i] == ' ' || data[i] == '\t') {
i--
}
if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') {
lineEnd = titleEnd
titleEnd = i
}
}
return
}
// The first bit of this logic is the same as Parser.listItem, but the rest
// is much simpler. This function simply finds the entire block and shifts it
// over by one tab if it is indeed a block (just returns the line if it's not).
// blockEnd is the end of the section in the input buffer, and contents is the
// extracted text that was shifted over one tab. It will need to be rendered at
// the end of the document.
func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) {
if i == 0 || len(data) == 0 {
return
}
// skip leading whitespace on first line
for i < len(data) && data[i] == ' ' {
i++
}
blockStart = i
// find the end of the line
blockEnd = i
for i < len(data) && data[i-1] != '\n' {
i++
}
// get working buffer
var raw bytes.Buffer
// put the first line into the working buffer
raw.Write(data[blockEnd:i])
blockEnd = i
// process the following lines
containsBlankLine := false
gatherLines:
for blockEnd < len(data) {
i++
// find the end of this line
for i < len(data) && data[i-1] != '\n' {
i++
}
// if it is an empty line, guess that it is part of this item
// and move on to the next line
if p.isEmpty(data[blockEnd:i]) > 0 {
containsBlankLine = true
blockEnd = i
continue
}
n := 0
if n = isIndented(data[blockEnd:i], indentSize); n == 0 {
// this is the end of the block.
// we don't want to include this last line in the index.
break gatherLines
}
// if there were blank lines before this one, insert a new one now
if containsBlankLine {
raw.WriteByte('\n')
containsBlankLine = false
}
// get rid of that first tab, write to buffer
raw.Write(data[blockEnd+n : i])
hasBlock = true
blockEnd = i
}
if data[blockEnd-1] != '\n' {
raw.WriteByte('\n')
}
contents = raw.Bytes()
return
}
//
//
// Miscellaneous helper functions
//
//
// Test if a character is a punctuation symbol.
// Taken from a private function in regexp in the stdlib.
func ispunct(c byte) bool {
for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") {
if c == r {
return true
}
}
return false
}
// Test if a character is a whitespace character.
func isspace(c byte) bool {
return ishorizontalspace(c) || isverticalspace(c)
}
// Test if a character is a horizontal whitespace character.
func ishorizontalspace(c byte) bool {
return c == ' ' || c == '\t'
}
// Test if a character is a vertical character.
func isverticalspace(c byte) bool {
return c == '\n' || c == '\r' || c == '\f' || c == '\v'
}
// Test if a character is letter.
func isletter(c byte) bool {
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
}
// Test if a character is a letter or a digit.
// TODO: check when this is looking for ASCII alnum and when it should use unicode
func isalnum(c byte) bool {
return (c >= '0' && c <= '9') || isletter(c)
}
// Replace tab characters with spaces, aligning to the next TAB_SIZE column.
// always ends output with a newline
func expandTabs(out *bytes.Buffer, line []byte, tabSize int) {
// first, check for common cases: no tabs, or only tabs at beginning of line
i, prefix := 0, 0
slowcase := false
for i = 0; i < len(line); i++ {
if line[i] == '\t' {
if prefix == i {
prefix++
} else {
slowcase = true
break
}
}
}
// no need to decode runes if all tabs are at the beginning of the line
if !slowcase {
for i = 0; i < prefix*tabSize; i++ {
out.WriteByte(' ')
}
out.Write(line[prefix:])
return
}
// the slow case: we need to count runes to figure out how
// many spaces to insert for each tab
column := 0
i = 0
for i < len(line) {
start := i
for i < len(line) && line[i] != '\t' {
_, size := utf8.DecodeRune(line[i:])
i += size
column++
}
if i > start {
out.Write(line[start:i])
}
if i >= len(line) {
break
}
for {
out.WriteByte(' ')
column++
if column%tabSize == 0 {
break
}
}
i++
}
}
// Find if a line counts as indented or not.
// Returns number of characters the indent is (0 = not indented).
func isIndented(data []byte, indentSize int) int {
if len(data) == 0 {
return 0
}
if data[0] == '\t' {
return 1
}
if len(data) < indentSize {
return 0
}
for i := 0; i < indentSize; i++ {
if data[i] != ' ' {
return 0
}
}
return indentSize
}
// Create a url-safe slug for fragments
func slugify(in []byte) []byte {
if len(in) == 0 {
return in
}
out := make([]byte, 0, len(in))
sym := false
for _, ch := range in {
if isalnum(ch) {
sym = false
out = append(out, ch)
} else if sym {
continue
} else {
out = append(out, '-')
sym = true
}
}
var a, b int
var ch byte
for a, ch = range out {
if ch != '-' {
break
}
}
for b = len(out) - 1; b > 0; b-- {
if out[b] != '-' {
break
}
}
return out[a : b+1]
}

360
vendor/github.com/russross/blackfriday/v2/node.go generated vendored Normal file
View File

@ -0,0 +1,360 @@
package blackfriday
import (
"bytes"
"fmt"
)
// NodeType specifies a type of a single node of a syntax tree. Usually one
// node (and its type) corresponds to a single markdown feature, e.g. emphasis
// or code block.
type NodeType int
// Constants for identifying different types of nodes. See NodeType.
const (
Document NodeType = iota
BlockQuote
List
Item
Paragraph
Heading
HorizontalRule
Emph
Strong
Del
Link
Image
Text
HTMLBlock
CodeBlock
Softbreak
Hardbreak
Code
HTMLSpan
Table
TableCell
TableHead
TableBody
TableRow
)
var nodeTypeNames = []string{
Document: "Document",
BlockQuote: "BlockQuote",
List: "List",
Item: "Item",
Paragraph: "Paragraph",
Heading: "Heading",
HorizontalRule: "HorizontalRule",
Emph: "Emph",
Strong: "Strong",
Del: "Del",
Link: "Link",
Image: "Image",
Text: "Text",
HTMLBlock: "HTMLBlock",
CodeBlock: "CodeBlock",
Softbreak: "Softbreak",
Hardbreak: "Hardbreak",
Code: "Code",
HTMLSpan: "HTMLSpan",
Table: "Table",
TableCell: "TableCell",
TableHead: "TableHead",
TableBody: "TableBody",
TableRow: "TableRow",
}
func (t NodeType) String() string {
return nodeTypeNames[t]
}
// ListData contains fields relevant to a List and Item node type.
type ListData struct {
ListFlags ListType
Tight bool // Skip <p>s around list item data if true
BulletChar byte // '*', '+' or '-' in bullet lists
Delimiter byte // '.' or ')' after the number in ordered lists
RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering
IsFootnotesList bool // This is a list of footnotes
}
// LinkData contains fields relevant to a Link node type.
type LinkData struct {
Destination []byte // Destination is what goes into a href
Title []byte // Title is the tooltip thing that goes in a title attribute
NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote
Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil.
}
// CodeBlockData contains fields relevant to a CodeBlock node type.
type CodeBlockData struct {
IsFenced bool // Specifies whether it's a fenced code block or an indented one
Info []byte // This holds the info string
FenceChar byte
FenceLength int
FenceOffset int
}
// TableCellData contains fields relevant to a TableCell node type.
type TableCellData struct {
IsHeader bool // This tells if it's under the header row
Align CellAlignFlags // This holds the value for align attribute
}
// HeadingData contains fields relevant to a Heading node type.
type HeadingData struct {
Level int // This holds the heading level number
HeadingID string // This might hold heading ID, if present
IsTitleblock bool // Specifies whether it's a title block
}
// Node is a single element in the abstract syntax tree of the parsed document.
// It holds connections to the structurally neighboring nodes and, for certain
// types of nodes, additional information that might be needed when rendering.
type Node struct {
Type NodeType // Determines the type of the node
Parent *Node // Points to the parent
FirstChild *Node // Points to the first child, if any
LastChild *Node // Points to the last child, if any
Prev *Node // Previous sibling; nil if it's the first child
Next *Node // Next sibling; nil if it's the last child
Literal []byte // Text contents of the leaf nodes
HeadingData // Populated if Type is Heading
ListData // Populated if Type is List
CodeBlockData // Populated if Type is CodeBlock
LinkData // Populated if Type is Link
TableCellData // Populated if Type is TableCell
content []byte // Markdown content of the block nodes
open bool // Specifies an open block node that has not been finished to process yet
}
// NewNode allocates a node of a specified type.
func NewNode(typ NodeType) *Node {
return &Node{
Type: typ,
open: true,
}
}
func (n *Node) String() string {
ellipsis := ""
snippet := n.Literal
if len(snippet) > 16 {
snippet = snippet[:16]
ellipsis = "..."
}
return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis)
}
// Unlink removes node 'n' from the tree.
// It panics if the node is nil.
func (n *Node) Unlink() {
if n.Prev != nil {
n.Prev.Next = n.Next
} else if n.Parent != nil {
n.Parent.FirstChild = n.Next
}
if n.Next != nil {
n.Next.Prev = n.Prev
} else if n.Parent != nil {
n.Parent.LastChild = n.Prev
}
n.Parent = nil
n.Next = nil
n.Prev = nil
}
// AppendChild adds a node 'child' as a child of 'n'.
// It panics if either node is nil.
func (n *Node) AppendChild(child *Node) {
child.Unlink()
child.Parent = n
if n.LastChild != nil {
n.LastChild.Next = child
child.Prev = n.LastChild
n.LastChild = child
} else {
n.FirstChild = child
n.LastChild = child
}
}
// InsertBefore inserts 'sibling' immediately before 'n'.
// It panics if either node is nil.
func (n *Node) InsertBefore(sibling *Node) {
sibling.Unlink()
sibling.Prev = n.Prev
if sibling.Prev != nil {
sibling.Prev.Next = sibling
}
sibling.Next = n
n.Prev = sibling
sibling.Parent = n.Parent
if sibling.Prev == nil {
sibling.Parent.FirstChild = sibling
}
}
// IsContainer returns true if 'n' can contain children.
func (n *Node) IsContainer() bool {
switch n.Type {
case Document:
fallthrough
case BlockQuote:
fallthrough
case List:
fallthrough
case Item:
fallthrough
case Paragraph:
fallthrough
case Heading:
fallthrough
case Emph:
fallthrough
case Strong:
fallthrough
case Del:
fallthrough
case Link:
fallthrough
case Image:
fallthrough
case Table:
fallthrough
case TableHead:
fallthrough
case TableBody:
fallthrough
case TableRow:
fallthrough
case TableCell:
return true
default:
return false
}
}
// IsLeaf returns true if 'n' is a leaf node.
func (n *Node) IsLeaf() bool {
return !n.IsContainer()
}
func (n *Node) canContain(t NodeType) bool {
if n.Type == List {
return t == Item
}
if n.Type == Document || n.Type == BlockQuote || n.Type == Item {
return t != Item
}
if n.Type == Table {
return t == TableHead || t == TableBody
}
if n.Type == TableHead || n.Type == TableBody {
return t == TableRow
}
if n.Type == TableRow {
return t == TableCell
}
return false
}
// WalkStatus allows NodeVisitor to have some control over the tree traversal.
// It is returned from NodeVisitor and different values allow Node.Walk to
// decide which node to go to next.
type WalkStatus int
const (
// GoToNext is the default traversal of every node.
GoToNext WalkStatus = iota
// SkipChildren tells walker to skip all children of current node.
SkipChildren
// Terminate tells walker to terminate the traversal.
Terminate
)
// NodeVisitor is a callback to be called when traversing the syntax tree.
// Called twice for every node: once with entering=true when the branch is
// first visited, then with entering=false after all the children are done.
type NodeVisitor func(node *Node, entering bool) WalkStatus
// Walk is a convenience method that instantiates a walker and starts a
// traversal of subtree rooted at n.
func (n *Node) Walk(visitor NodeVisitor) {
w := newNodeWalker(n)
for w.current != nil {
status := visitor(w.current, w.entering)
switch status {
case GoToNext:
w.next()
case SkipChildren:
w.entering = false
w.next()
case Terminate:
return
}
}
}
type nodeWalker struct {
current *Node
root *Node
entering bool
}
func newNodeWalker(root *Node) *nodeWalker {
return &nodeWalker{
current: root,
root: root,
entering: true,
}
}
func (nw *nodeWalker) next() {
if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root {
nw.current = nil
return
}
if nw.entering && nw.current.IsContainer() {
if nw.current.FirstChild != nil {
nw.current = nw.current.FirstChild
nw.entering = true
} else {
nw.entering = false
}
} else if nw.current.Next == nil {
nw.current = nw.current.Parent
nw.entering = false
} else {
nw.current = nw.current.Next
nw.entering = true
}
}
func dump(ast *Node) {
fmt.Println(dumpString(ast))
}
func dumpR(ast *Node, depth int) string {
if ast == nil {
return ""
}
indent := bytes.Repeat([]byte("\t"), depth)
content := ast.Literal
if content == nil {
content = ast.content
}
result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content)
for n := ast.FirstChild; n != nil; n = n.Next {
result += dumpR(n, depth+1)
}
return result
}
func dumpString(ast *Node) string {
return dumpR(ast, 0)
}

View File

@ -0,0 +1,457 @@
//
// Blackfriday Markdown Processor
// Available at http://github.com/russross/blackfriday
//
// Copyright © 2011 Russ Ross <russ@russross.com>.
// Distributed under the Simplified BSD License.
// See README.md for details.
//
//
//
// SmartyPants rendering
//
//
package blackfriday
import (
"bytes"
"io"
)
// SPRenderer is a struct containing state of a Smartypants renderer.
type SPRenderer struct {
inSingleQuote bool
inDoubleQuote bool
callbacks [256]smartCallback
}
func wordBoundary(c byte) bool {
return c == 0 || isspace(c) || ispunct(c)
}
func tolower(c byte) byte {
if c >= 'A' && c <= 'Z' {
return c - 'A' + 'a'
}
return c
}
func isdigit(c byte) bool {
return c >= '0' && c <= '9'
}
func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool {
// edge of the buffer is likely to be a tag that we don't get to see,
// so we treat it like text sometimes
// enumerate all sixteen possibilities for (previousChar, nextChar)
// each can be one of {0, space, punct, other}
switch {
case previousChar == 0 && nextChar == 0:
// context is not any help here, so toggle
*isOpen = !*isOpen
case isspace(previousChar) && nextChar == 0:
// [ "] might be [ "<code>foo...]
*isOpen = true
case ispunct(previousChar) && nextChar == 0:
// [!"] hmm... could be [Run!"] or [("<code>...]
*isOpen = false
case /* isnormal(previousChar) && */ nextChar == 0:
// [a"] is probably a close
*isOpen = false
case previousChar == 0 && isspace(nextChar):
// [" ] might be [...foo</code>" ]
*isOpen = false
case isspace(previousChar) && isspace(nextChar):
// [ " ] context is not any help here, so toggle
*isOpen = !*isOpen
case ispunct(previousChar) && isspace(nextChar):
// [!" ] is probably a close
*isOpen = false
case /* isnormal(previousChar) && */ isspace(nextChar):
// [a" ] this is one of the easy cases
*isOpen = false
case previousChar == 0 && ispunct(nextChar):
// ["!] hmm... could be ["$1.95] or [</code>"!...]
*isOpen = false
case isspace(previousChar) && ispunct(nextChar):
// [ "!] looks more like [ "$1.95]
*isOpen = true
case ispunct(previousChar) && ispunct(nextChar):
// [!"!] context is not any help here, so toggle
*isOpen = !*isOpen
case /* isnormal(previousChar) && */ ispunct(nextChar):
// [a"!] is probably a close
*isOpen = false
case previousChar == 0 /* && isnormal(nextChar) */ :
// ["a] is probably an open
*isOpen = true
case isspace(previousChar) /* && isnormal(nextChar) */ :
// [ "a] this is one of the easy cases
*isOpen = true
case ispunct(previousChar) /* && isnormal(nextChar) */ :
// [!"a] is probably an open
*isOpen = true
default:
// [a'b] maybe a contraction?
*isOpen = false
}
// Note that with the limited lookahead, this non-breaking
// space will also be appended to single double quotes.
if addNBSP && !*isOpen {
out.WriteString("&nbsp;")
}
out.WriteByte('&')
if *isOpen {
out.WriteByte('l')
} else {
out.WriteByte('r')
}
out.WriteByte(quote)
out.WriteString("quo;")
if addNBSP && *isOpen {
out.WriteString("&nbsp;")
}
return true
}
func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
if len(text) >= 2 {
t1 := tolower(text[1])
if t1 == '\'' {
nextChar := byte(0)
if len(text) >= 3 {
nextChar = text[2]
}
if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
return 1
}
}
if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) {
out.WriteString("&rsquo;")
return 0
}
if len(text) >= 3 {
t2 := tolower(text[2])
if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) &&
(len(text) < 4 || wordBoundary(text[3])) {
out.WriteString("&rsquo;")
return 0
}
}
}
nextChar := byte(0)
if len(text) > 1 {
nextChar = text[1]
}
if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) {
return 0
}
out.WriteByte(text[0])
return 0
}
func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int {
if len(text) >= 3 {
t1 := tolower(text[1])
t2 := tolower(text[2])
if t1 == 'c' && t2 == ')' {
out.WriteString("&copy;")
return 2
}
if t1 == 'r' && t2 == ')' {
out.WriteString("&reg;")
return 2
}
if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' {
out.WriteString("&trade;")
return 3
}
}
out.WriteByte(text[0])
return 0
}
func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int {
if len(text) >= 2 {
if text[1] == '-' {
out.WriteString("&mdash;")
return 1
}
if wordBoundary(previousChar) && wordBoundary(text[1]) {
out.WriteString("&ndash;")
return 0
}
}
out.WriteByte(text[0])
return 0
}
func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int {
if len(text) >= 3 && text[1] == '-' && text[2] == '-' {
out.WriteString("&mdash;")
return 2
}
if len(text) >= 2 && text[1] == '-' {
out.WriteString("&ndash;")
return 1
}
out.WriteByte(text[0])
return 0
}
func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int {
if bytes.HasPrefix(text, []byte("&quot;")) {
nextChar := byte(0)
if len(text) >= 7 {
nextChar = text[6]
}
if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) {
return 5
}
}
if bytes.HasPrefix(text, []byte("&#0;")) {
return 3
}
out.WriteByte('&')
return 0
}
func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int {
var quote byte = 'd'
if angledQuotes {
quote = 'a'
}
return func(out *bytes.Buffer, previousChar byte, text []byte) int {
return r.smartAmpVariant(out, previousChar, text, quote, addNBSP)
}
}
func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int {
if len(text) >= 3 && text[1] == '.' && text[2] == '.' {
out.WriteString("&hellip;")
return 2
}
if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' {
out.WriteString("&hellip;")
return 4
}
out.WriteByte(text[0])
return 0
}
func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int {
if len(text) >= 2 && text[1] == '`' {
nextChar := byte(0)
if len(text) >= 3 {
nextChar = text[2]
}
if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) {
return 1
}
}
out.WriteByte(text[0])
return 0
}
func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int {
if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
// is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b
// note: check for regular slash (/) or fraction slash (, 0x2044, or 0xe2 81 84 in utf-8)
// and avoid changing dates like 1/23/2005 into fractions.
numEnd := 0
for len(text) > numEnd && isdigit(text[numEnd]) {
numEnd++
}
if numEnd == 0 {
out.WriteByte(text[0])
return 0
}
denStart := numEnd + 1
if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 {
denStart = numEnd + 3
} else if len(text) < numEnd+2 || text[numEnd] != '/' {
out.WriteByte(text[0])
return 0
}
denEnd := denStart
for len(text) > denEnd && isdigit(text[denEnd]) {
denEnd++
}
if denEnd == denStart {
out.WriteByte(text[0])
return 0
}
if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' {
out.WriteString("<sup>")
out.Write(text[:numEnd])
out.WriteString("</sup>&frasl;<sub>")
out.Write(text[denStart:denEnd])
out.WriteString("</sub>")
return denEnd - 1
}
}
out.WriteByte(text[0])
return 0
}
func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int {
if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 {
if text[0] == '1' && text[1] == '/' && text[2] == '2' {
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' {
out.WriteString("&frac12;")
return 2
}
}
if text[0] == '1' && text[1] == '/' && text[2] == '4' {
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') {
out.WriteString("&frac14;")
return 2
}
}
if text[0] == '3' && text[1] == '/' && text[2] == '4' {
if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') {
out.WriteString("&frac34;")
return 2
}
}
}
out.WriteByte(text[0])
return 0
}
func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int {
nextChar := byte(0)
if len(text) > 1 {
nextChar = text[1]
}
if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) {
out.WriteString("&quot;")
}
return 0
}
func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
return r.smartDoubleQuoteVariant(out, previousChar, text, 'd')
}
func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int {
return r.smartDoubleQuoteVariant(out, previousChar, text, 'a')
}
func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int {
i := 0
for i < len(text) && text[i] != '>' {
i++
}
out.Write(text[:i+1])
return i
}
type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int
// NewSmartypantsRenderer constructs a Smartypants renderer object.
func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer {
var (
r SPRenderer
smartAmpAngled = r.smartAmp(true, false)
smartAmpAngledNBSP = r.smartAmp(true, true)
smartAmpRegular = r.smartAmp(false, false)
smartAmpRegularNBSP = r.smartAmp(false, true)
addNBSP = flags&SmartypantsQuotesNBSP != 0
)
if flags&SmartypantsAngledQuotes == 0 {
r.callbacks['"'] = r.smartDoubleQuote
if !addNBSP {
r.callbacks['&'] = smartAmpRegular
} else {
r.callbacks['&'] = smartAmpRegularNBSP
}
} else {
r.callbacks['"'] = r.smartAngledDoubleQuote
if !addNBSP {
r.callbacks['&'] = smartAmpAngled
} else {
r.callbacks['&'] = smartAmpAngledNBSP
}
}
r.callbacks['\''] = r.smartSingleQuote
r.callbacks['('] = r.smartParens
if flags&SmartypantsDashes != 0 {
if flags&SmartypantsLatexDashes == 0 {
r.callbacks['-'] = r.smartDash
} else {
r.callbacks['-'] = r.smartDashLatex
}
}
r.callbacks['.'] = r.smartPeriod
if flags&SmartypantsFractions == 0 {
r.callbacks['1'] = r.smartNumber
r.callbacks['3'] = r.smartNumber
} else {
for ch := '1'; ch <= '9'; ch++ {
r.callbacks[ch] = r.smartNumberGeneric
}
}
r.callbacks['<'] = r.smartLeftAngle
r.callbacks['`'] = r.smartBacktick
return &r
}
// Process is the entry point of the Smartypants renderer.
func (r *SPRenderer) Process(w io.Writer, text []byte) {
mark := 0
for i := 0; i < len(text); i++ {
if action := r.callbacks[text[i]]; action != nil {
if i > mark {
w.Write(text[mark:i])
}
previousChar := byte(0)
if i > 0 {
previousChar = text[i-1]
}
var tmp bytes.Buffer
i += action(&tmp, previousChar, text[i:])
w.Write(tmp.Bytes())
mark = i + 1
}
}
if mark < len(text) {
w.Write(text[mark:])
}
}

3
vendor/modules.txt vendored Normal file
View File

@ -0,0 +1,3 @@
# github.com/russross/blackfriday/v2 v2.1.0
## explicit
github.com/russross/blackfriday/v2