diff --git a/lib/nulib/.gitignore b/lib/nulib/.gitignore
new file mode 100644
index 0000000..e69de29
diff --git a/lib/nulib/MANIFEST.in b/lib/nulib/MANIFEST.in
new file mode 100644
index 0000000..73dd970
--- /dev/null
+++ b/lib/nulib/MANIFEST.in
@@ -0,0 +1,4 @@
+global-include *
+global-exclude *.pyc
+exclude MANIFEST
+prune dist
diff --git a/lib/nulib/TODO.md b/lib/nulib/TODO.md
new file mode 100644
index 0000000..458a07e
--- /dev/null
+++ b/lib/nulib/TODO.md
@@ -0,0 +1,180 @@
+# TODO
+
+## Améliorer rtoinst
+
+ajouter le support de pffprofile pour deploydb
+
+## Refaire/repenser les fonctions evalX
+
+dans chaque exemple, on affiche l'invocation de evalX suivi de l'équivalent en
+syntaxe standard
+
+- evala permet de traiter des tableaux
+ ~~~
+ evala with array // add a b c
+ array=("${array[@]}" a b c)
+ ~~~
+ les fonctions à utiliser pour le traitement sont configurées avec des
+ variables spéciales. par exemple, on peut indiquer que la commande add
+ ci-dessus est en réalité gérée par la fonction array_add et que c'est une
+ commande de type modify qui prend en premier argument le nom du tableau:
+ ~~~
+ __evala_add_func=array_add
+ __evala_add_type=m
+ __evala_add_arg=first
+ ~~~
+ en fonction du type de fonction, les arguments supplémentaires supportés sont
+ différents. par défaut, la fonction à utiliser est du même nom que la
+ commande, est du type scalar, et prend comme argument @
+ Ainsi les deux commandes suivantes sont équivalentes:
+ ~~~
+ evala with array // echo
+ echo "${array[@]}"
+ ~~~
+ et assument les définitions suivantes:
+ ~~~
+ __evala_echo_func=echo
+ __evala_echo_type=s
+ __evala_echo_arg=@
+ ~~~
+
+- evalx permet d'utiliser toutes ces fonctions ensemble
+ ~~~
+ evalx seq 5 //p grep -v 3 //a prepend prefix // append suffix //c echo
+ array=($(seq 5 | grep -v 3)); array=(prefix "${array[@]}"); array=("${array[@]}" suffix); echo "${array[@]}"
+
+ # à partir du mode evala, on peut exécuter directement les arguments du
+ # tableau comme une commande en terminant par //c
+ evalx -a with array // prepend echo //c
+ array=(echo "${array[@]}"); "${array[@]}"
+ ~~~
+ evalx commence par défaut en mode evalc. il est possible avec les options -i,
+ -s, -a, -c, -p, -m de forcer respectivement evali, evals, evala, evalc, evalp,
+ evalm
+
+- Il faudra réfléchir à comment sortir du mode evalm pour utilisation avec
+ evalx. ou alors on part du principe que evalm est toujours en fin de chaine.
+
+## Faire la fonction cmdx
+
+cmdx permet de lancer une commande avec les arguments qui sont générés par
+evalx. cmdx commence par défaut en mode evalm. Par exemple, les deux commandes
+suivantes sont équivalentes:
+~~~
+cmdx etitle //"Copie de " basename "$src" //" vers " ppath "$dest"
+etitle "Copie de $(basename "$src") vers $(ppath "$dest")"
+~~~
+
+Comme pour evalx, les options -i, -s, -a, -c, -p, -m permettent de forcer
+respectivement les modes evali, evals, evala, evalc, evalp, evalm. Par exemple
+les deux commandes suivantes sont équivalentes:
+~~~
+cmdx -c echo a // b
+echo "$(b "$(a)")"
+~~~
+
+## Faire la fonction checkx
+
+checkx permet de tester le résultat d'une commande evalx. elle s'utilise de
+cette manière:
+~~~
+checkx cmds... OP VALUE
+~~~
+
+Les opérateurs sont de la forme:
+~~~
+is -n|notempty
+is -z|empty
+is ok
+is ko
+== value
+!= value
+etc.
+~~~
+
+checkx remplace testx avec une syntaxe plus naturelle. si aucun script
+n'utilise les fonctions testx, peut-être peut-on simplement supprimer les
+fonctions testx et renommer checkx en testx
+
+Comme pour evalx, les options -i, -s, -a, -c, -p, -m permettent de forcer
+respectivement les modes evali, evals, evala, evalc, evalp, evalm. Par exemple
+les deux commandes suivantes sont équivalentes:
+~~~
+checkx -p a // b == c
+[ "$(evalp a // b)" == c ]
+~~~
+
+Les commande suivantes sont équivalentes deux à deux:
+~~~
+checkx cmd is -z
+[ -z "$(evalx cmd)" ]
+
+checkx cmd is ok
+evalx cmd; [ $? -eq 0 ]
+
+checkx cmd is ko
+evalx cmd; [ $? -ne 0 ]
+
+checkx cmd == value
+[ "$(evalx cmd)" == value ]
+~~~
+
+## Faire la fonction storex
+
+storex permet de mettre le résultat d'une fonction evalx dans une variable ou de
+l'ajouter à un tableau. l'idée est d'avoir la même syntaxe que checkx. je ne
+suis pas encore tout à fait sûr que ce soit une bonne chose.
+
+Les commande suivantes sont équivalentes deux à deux:
+~~~
+storex cmd to var
+var="$(evalx cmd)"
+
+storex cmd to var
+setx var=cmd
+
+storex -a cmd to array
+array_add array "$(evalx cmd)"
+
+storex -r cmd from array
+array_del array "$(evalx cmd)"
+~~~
+
+syntaxes alternatives
+~~~
+storex cmd to var
+addx cmd to array
+removex cmd from array
+~~~
+
+alternatives
+~~~
+setx var=cmd
+evalx cmd // array_add array
+evalx cmd // array_del array
+~~~
+
+note: il ne semble pas nécessaire au vu de l'alternative d'implémenter storex,
+addx, removex.
+
+par contre, il faut corriger un bug d'evalc: la dernière commande doit être
+exécutée telle quelle. en effet,
+~~~
+evalc a // b
+~~~
+devrait être équivalent à
+~~~
+b "$(a)"
+~~~
+mais en fait c'est plutôt
+~~~
+echo "$(b "$(a)")"
+~~~
+et ça pose problème, notamment si b initialise des variables, etc.
+
+## Fonctions diverses
+
+`retcode cmd`
+: affiche le code de retour de cmd. équivalent à `cmd; echo $?`
+
+-*- coding: utf-8 mode: markdown -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8:noeol:binary
\ No newline at end of file
diff --git a/lib/nulib/awk/base b/lib/nulib/awk/base
new file mode 100644
index 0000000..b62f5c3
--- /dev/null
+++ b/lib/nulib/awk/base
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 mode: awk -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+@include "base.core"
+@include "base.array"
+@include "base.date"
diff --git a/lib/nulib/awk/base.array b/lib/nulib/awk/base.array
new file mode 100644
index 0000000..bd5ac32
--- /dev/null
+++ b/lib/nulib/awk/base.array
@@ -0,0 +1,157 @@
+# -*- coding: utf-8 mode: awk -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+function mkindices(values, indices, i, j) {
+ array_new(indices)
+ j = 1
+ for (i in values) {
+ indices[j++] = int(i)
+ }
+ return asort(indices)
+}
+function array_new(dest) {
+ dest[0] = 0 # forcer awk à considérer dest comme un tableau
+ delete dest
+}
+function array_newsize(dest, size, i) {
+ dest[0] = 0 # forcer awk à considérer dest comme un tableau
+ delete dest
+ size = int(size)
+ for (i = 1; i <= size; i++) {
+ dest[i] = ""
+ }
+}
+function array_len(values, count, i) {
+ # length(array) a un bug sur awk 3.1.5
+ # cette version est plus lente mais fonctionne toujours
+ count = 0
+ for (i in values) {
+ count++
+ }
+ return count
+}
+function array_copy(dest, src, count, indices, i) {
+ array_new(dest)
+ count = mkindices(src, indices)
+ for (i = 1; i <= count; i++) {
+ dest[indices[i]] = src[indices[i]]
+ }
+}
+function array_getlastindex(src, count, indices) {
+ count = mkindices(src, indices)
+ if (count == 0) return 0
+ return indices[count]
+}
+function array_add(dest, value, lastindex) {
+ lastindex = array_getlastindex(dest)
+ dest[lastindex + 1] = value
+}
+function array_deli(dest, i, l) {
+ i = int(i)
+ if (i == 0) return
+ l = array_len(dest)
+ while (i < l) {
+ dest[i] = dest[i + 1]
+ i++
+ }
+ delete dest[l]
+}
+function array_del(dest, value, ignoreCase, i) {
+ do {
+ i = key_index(value, dest, ignoreCase)
+ if (i != 0) array_deli(dest, i)
+ } while (i != 0)
+}
+function array_extend(dest, src, count, lastindex, indices, i) {
+ lastindex = array_getlastindex(dest)
+ count = mkindices(src, indices)
+ for (i = 1; i <= count; i++) {
+ dest[lastindex + i] = src[indices[i]]
+ }
+}
+function array_fill(dest, i) {
+ array_new(dest)
+ for (i = 1; i <= NF; i++) {
+ dest[i] = $i
+ }
+}
+function array_getline(src, count, indices, i, j) {
+ $0 = ""
+ count = mkindices(src, indices)
+ for (i = 1; i <= count; i++) {
+ j = indices[i]
+ $j = src[j]
+ }
+}
+function array_appendline(src, count, indices, i, nf, j) {
+ count = mkindices(src, indices)
+ nf = NF
+ for (i = 1; i <= count; i++) {
+ j = nf + indices[i]
+ $j = src[indices[i]]
+ }
+}
+function in_array(value, values, ignoreCase, i) {
+ if (ignoreCase) {
+ value = tolower(value)
+ for (i in values) {
+ if (tolower(values[i]) == value) return 1
+ }
+ } else {
+ for (i in values) {
+ if (values[i] == value) return 1
+ }
+ }
+ return 0
+}
+function key_index(value, values, ignoreCase, i) {
+ if (ignoreCase) {
+ value = tolower(value)
+ for (i in values) {
+ if (tolower(values[i]) == value) return int(i)
+ }
+ } else {
+ for (i in values) {
+ if (values[i] == value) return int(i)
+ }
+ }
+ return 0
+}
+function array2s(values, prefix, sep, suffix, noindices, first, i, s) {
+ if (!prefix) prefix = "["
+ if (!sep) sep = ", "
+ if (!suffix) suffix = "]"
+ s = prefix
+ first = 1
+ for (i in values) {
+ if (first) first = 0
+ else s = s sep
+ if (!noindices) s = s "[" i "]="
+ s = s values[i]
+ }
+ s = s suffix
+ return s
+}
+function array2so(values, prefix, sep, suffix, noindices, count, indices, i, s) {
+ if (!prefix) prefix = "["
+ if (!sep) sep = ", "
+ if (!suffix) suffix = "]"
+ s = prefix
+ count = mkindices(values, indices)
+ for (i = 1; i <= count; i++) {
+ if (i > 1) s = s sep
+ if (!noindices) s = s "[" indices[i] "]="
+ s = s values[indices[i]]
+ }
+ s = s suffix
+ return s
+}
+function array_join(values, sep, prefix, suffix, count, indices, i, s) {
+ s = prefix
+ count = mkindices(values, indices)
+ for (i = 1; i <= count; i++) {
+ if (i > 1) s = s sep
+ s = s values[indices[i]]
+ }
+ s = s suffix
+ return s
+}
diff --git a/lib/nulib/awk/base.core b/lib/nulib/awk/base.core
new file mode 100644
index 0000000..49a4b58
--- /dev/null
+++ b/lib/nulib/awk/base.core
@@ -0,0 +1,141 @@
+# -*- coding: utf-8 mode: awk -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+function num(s) {
+ if (s ~ /^[0-9]+$/) return int(s)
+ else return s
+}
+function ord(s, i) {
+ s = substr(s, 1, 1)
+ i = index(" !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~", s)
+ if (i != 0) i += 32 - 1
+ return i
+}
+function hex(i, s) {
+ s = sprintf("%x", i)
+ if (length(s) < 2) s = "0" s
+ return s
+}
+function qhtml(s) {
+ gsub(/&/, "\\&", s)
+ gsub(/"/, "\\"", s)
+ gsub(/>/, "\\>", s)
+ gsub(/, "\\<", s)
+ return s
+}
+function unquote_html(s) {
+ gsub(/</, "<", s)
+ gsub(/>/, ">", s)
+ gsub(/"/, "\"", s)
+ gsub(/&/, "\\&", s)
+ return s
+}
+function qawk(s) {
+ gsub(/\\/, "\\\\", s)
+ gsub(/"/, "\\\"", s)
+ gsub(/\n/, "\\n", s)
+ return "\"" s "\""
+}
+function qval(s) {
+ gsub(/'/, "'\\''", s)
+ return "'" s "'"
+}
+function sqval(s) {
+ return " " qval(s)
+}
+function qvals( i, line) {
+ line = ""
+ for (i = 1; i <= NF; i++) {
+ if (i > 1) line = line " "
+ line = line qval($i)
+ }
+ return line
+}
+function sqvals() {
+ return " " qvals()
+}
+function qarr(values, prefix, i, count, line) {
+ line = prefix
+ count = array_len(values)
+ for (i = 1; i <= count; i++) {
+ if (i > 1 || line != "") line = line " "
+ line = line qval(values[i])
+ }
+ return line
+}
+function qregexp(s) {
+ gsub(/[[\\.^$*+?()|{]/, "\\\\&", s)
+ return s
+}
+function qsubrepl(s) {
+ gsub(/\\/, "\\\\", s)
+ gsub(/&/, "\\\\&", s)
+ return s
+}
+function qgrep(s) {
+ gsub(/[[\\.^$*]/, "\\\\&", s)
+ return s
+}
+function qegrep(s) {
+ gsub(/[[\\.^$*+?()|{]/, "\\\\&", s)
+ return s
+}
+function qsql(s, suffix) {
+ gsub(/'/, "''", s)
+ return "'" s "'" (suffix != ""? " " suffix: "")
+}
+function cqsql(s, suffix) {
+ return "," qsql(s, suffix)
+}
+function unquote_mysqlcsv(s) {
+ gsub(/\\n/, "\n", s)
+ gsub(/\\t/, "\t", s)
+ gsub(/\\0/, "\0", s)
+ gsub(/\\\\/, "\\", s)
+ return s
+}
+function sval(s) {
+ if (s == "") return s
+ else return " " s
+}
+function cval(s, suffix) {
+ suffix = suffix != ""? " " suffix: ""
+ if (s == "") return s
+ else return "," s suffix
+}
+
+function printto(s, output) {
+ if (output == "") {
+ print s
+ } else if (output ~ /^>>/) {
+ sub(/^>>/, "", output)
+ print s >>output
+ } else if (output ~ /^>/) {
+ sub(/^>/, "", output)
+ print s >output
+ } else if (output ~ /^\|&/) {
+ sub(/^\|&/, "", output)
+ print s |&output
+ } else if (output ~ /^\|/) {
+ sub(/^\|/, "", output)
+ print s |output
+ } else {
+ print s >output
+ }
+}
+function find_line(input, field, value, orig, line) {
+ orig = $0
+ line = ""
+ while ((getline 0) {
+ if ($field == value) {
+ line = $0
+ break
+ }
+ }
+ close(input)
+ $0 = orig
+ return line
+}
+function merge_line(input, field, key, line) {
+ line = find_line(input, field, $key)
+ if (line != "") $0 = $0 FS line
+}
diff --git a/lib/nulib/awk/base.date b/lib/nulib/awk/base.date
new file mode 100644
index 0000000..48e3eff
--- /dev/null
+++ b/lib/nulib/awk/base.date
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 mode: awk -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+function date__parse_fr(date, parts, y, m, d) {
+ if (match(date, /([0-9][0-9]?)\/([0-9][0-9]?)\/([0-9][0-9][0-9][0-9])/, parts)) {
+ y = int(parts[3])
+ m = int(parts[2])
+ d = int(parts[1])
+ return mktime(sprintf("%04i %02i %02i 00 00 00 +0400", y, m, d))
+ } else if (match(date, /([0-9][0-9]?)\/([0-9][0-9]?)\/([0-9][0-9])/, parts)) {
+ basey = int(strftime("%Y")); basey = basey - basey % 100
+ y = basey + int(parts[3])
+ m = int(parts[2])
+ d = int(parts[1])
+ return mktime(sprintf("%04i %02i %02i 00 00 00 +0400", y, m, d))
+ }
+ return -1
+}
+function date__parse_mysql(date, parts, y, m, d) {
+ if (match(date, /([0-9][0-9][0-9][0-9])-([0-9][0-9])-([0-9][0-9])/, parts)) {
+ y = int(parts[1])
+ m = int(parts[2])
+ d = int(parts[3])
+ return mktime(sprintf("%04i %02i %02i 00 00 00 +0400", y, m, d))
+ }
+ return -1
+}
+function date__parse_any(date, serial) {
+ serial = date__parse_fr(date)
+ if (serial == -1) serial = date__parse_mysql(date)
+ return serial
+}
+function date_serial(date) {
+ return date__parse_any(date)
+}
+function date_parse(date, serial) {
+ serial = date__parse_any(date)
+ if (serial == -1) return date
+ return strftime("%d/%m/%Y", serial)
+}
+function date_monday(date, serial, dow) {
+ serial = date__parse_any(date)
+ if (serial == -1) return date
+ dow = strftime("%u", serial)
+ serial -= (dow - 1) * 86400
+ return strftime("%d/%m/%Y", serial)
+}
+function date_add(date, nbdays, serial) {
+ serial = date__parse_any(date)
+ if (serial == -1) return date
+ serial += nbdays * 86400
+ return strftime("%d/%m/%Y", serial)
+}
diff --git a/lib/nulib/awk/csv b/lib/nulib/awk/csv
new file mode 100644
index 0000000..7f16be4
--- /dev/null
+++ b/lib/nulib/awk/csv
@@ -0,0 +1,201 @@
+# -*- coding: utf-8 mode: awk -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+@include "base.core"
+@include "base.array"
+
+function csv__parse_quoted(line, destl, colsep, qchar, echar, pos, tmpl, nextc, resl) {
+ line = substr(line, 2)
+ resl = ""
+ while (1) {
+ pos = index(line, qchar)
+ if (pos == 0) {
+ # chaine mal terminee
+ resl = resl line
+ destl[0] = ""
+ destl[1] = 0
+ return resl
+ }
+ if (echar != "" && pos > 1) {
+ # tenir compte du fait qu"un caratère peut être mis en échappement
+ prevc = substr(line, pos - 1, 1)
+ quotec = substr(line, pos, 1)
+ nextc = substr(line, pos + 1, 1)
+ if (prevc == echar) {
+ # qchar en échappement
+ tmpl = substr(line, 1, pos - 2)
+ resl = resl tmpl quotec
+ line = substr(line, pos + 1)
+ continue
+ }
+ tmpl = substr(line, 1, pos - 1)
+ if (nextc == colsep || nextc == "") {
+ # fin de champ ou fin de ligne
+ resl = resl tmpl
+ destl[0] = substr(line, pos + 2)
+ destl[1] = nextc == colsep
+ return resl
+ } else {
+ # erreur de syntaxe: guillemet non mis en échappement
+ # ignorer cette erreur et prendre le guillemet quand meme
+ resl = resl tmpl quotec
+ line = substr(line, pos + 1)
+ }
+ } else {
+ # pas d"échappement pour qchar. il est éventuellement doublé
+ tmpl = substr(line, 1, pos - 1)
+ quotec = substr(line, pos, 1)
+ nextc = substr(line, pos + 1, 1)
+ if (nextc == colsep || nextc == "") {
+ # fin de champ ou fin de ligne
+ resl = resl tmpl
+ destl[0] = substr(line, pos + 2)
+ destl[1] = nextc == colsep
+ return resl
+ } else if (nextc == qchar) {
+ # qchar en echappement
+ resl = resl tmpl quotec
+ line = substr(line, pos + 2)
+ } else {
+ # erreur de syntaxe: guillemet non mis en échappement
+ # ignorer cette erreur et prendre le guillemet quand meme
+ resl = resl tmpl quotec
+ line = substr(line, pos + 1)
+ }
+ }
+ }
+}
+function csv__parse_unquoted(line, destl, colsep, qchar, echar, pos) {
+ pos = index(line, colsep)
+ if (pos == 0) {
+ destl[0] = ""
+ destl[1] = 0
+ return line
+ } else {
+ destl[0] = substr(line, pos + 1)
+ destl[1] = 1
+ return substr(line, 1, pos - 1)
+ }
+}
+function csv__array_parse(fields, line, nbfields, colsep, qchar, echar, shouldparse, destl, i) {
+ array_new(fields)
+ array_new(destl)
+ i = 1
+ shouldparse = 0
+ # shouldparse permet de gérer le cas où un champ vide est en fin de ligne.
+ # en effet, après "," il faut toujours parser, même si line==""
+ while (shouldparse || line != "") {
+ if (index(line, qchar) == 1) {
+ value = csv__parse_quoted(line, destl, colsep, qchar, echar)
+ line = destl[0]
+ shouldparse = destl[1]
+ } else {
+ value = csv__parse_unquoted(line, destl, colsep, qchar, echar)
+ line = destl[0]
+ shouldparse = destl[1]
+ }
+ fields[i] = value
+ i = i + 1
+ }
+ if (nbfields) {
+ nbfields = int(nbfields)
+ i = array_len(fields)
+ while (i < nbfields) {
+ i++
+ fields[i] = ""
+ }
+ }
+ return array_len(fields)
+}
+BEGIN {
+ DEFAULT_COLSEP = ","
+ DEFAULT_QCHAR = "\""
+ DEFAULT_ECHAR = ""
+}
+function array_parsecsv2(fields, line, nbfields, colsep, qchar, echar) {
+ return csv__array_parse(fields, line, nbfields, colsep, qchar, echar)
+}
+function array_parsecsv(fields, line, nbfields, colsep, qchar, echar) {
+ if (colsep == "") colsep = DEFAULT_COLSEP
+ if (qchar == "") qchar = DEFAULT_QCHAR
+ if (echar == "") echar = DEFAULT_ECHAR
+ return csv__array_parse(fields, line, nbfields, colsep, qchar, echar)
+}
+function parsecsv(line, fields) {
+ array_parsecsv(fields, line)
+ array_getline(fields)
+ return NF
+}
+function getlinecsv(file, fields) {
+ if (file) {
+ getline 1) line = line colsep
+ if (qchar != "" && index(value, qchar) != 0) {
+ if (echar != "") gsub(qchar, quote_subrepl(echar) "&", value);
+ else gsub(qchar, "&&", value);
+ }
+ if (qchar != "" && (index(value, mvsep) != 0 || index(value, colsep) != 0 || index(value, qchar) != 0 || csv__should_quote(value))) {
+ line = line qchar value qchar
+ } else {
+ line = line value
+ }
+ }
+ return line
+}
+function array_formatcsv(fields) {
+ return array_formatcsv2(fields, ",", ";", "\"", "")
+}
+function array_printcsv(fields, output) {
+ printto(array_formatcsv(fields), output)
+}
+function get_formatcsv( fields) {
+ array_fill(fields)
+ return array_formatcsv(fields)
+}
+function formatcsv() {
+ $0 = get_formatcsv()
+}
+function printcsv(output, fields) {
+ array_fill(fields)
+ array_printcsv(fields, output)
+}
+function array_findcsv(fields, input, field, value, nbfields, orig, found, i) {
+ array_new(orig)
+ array_fill(orig)
+ array_new(fields)
+ found = 0
+ while ((getline 0) {
+ array_parsecsv(fields, $0, nbfields)
+ if (fields[field] == value) {
+ found = 1
+ break
+ }
+ }
+ close(input)
+ array_getline(orig)
+ if (!found) {
+ delete fields
+ if (nbfields) {
+ nbfields = int(nbfields)
+ i = array_len(fields)
+ while (i < nbfields) {
+ i++
+ fields[i] = ""
+ }
+ }
+ }
+ return found
+}
diff --git a/lib/nulib/awk/enc.base64 b/lib/nulib/awk/enc.base64
new file mode 100644
index 0000000..b782fcf
--- /dev/null
+++ b/lib/nulib/awk/enc.base64
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 mode: awk -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+function base64__and(var, x, l_res, l_i) {
+ l_res = 0;
+ for (l_i = 0; l_i < 8; l_i++){
+ if (var%2 == 1 && x%2 == 1) l_res = l_res/2 + 128;
+ else l_res /= 2;
+ var = int(var/2);
+ x = int(x/2);
+ }
+ return l_res;
+}
+# Rotate bytevalue left x times
+function base64__lshift(var, x) {
+ while(x > 0){
+ var *= 2;
+ x--;
+ }
+ return var;
+}
+# Rotate bytevalue right x times
+function base64__rshift(var, x) {
+ while(x > 0){
+ var = int(var/2);
+ x--;
+ }
+ return var;
+}
+BEGIN {
+ BASE64__BYTES = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
+}
+function b64decode(src, result, base1, base2, base3, base4) {
+ result = ""
+ while (length(src) > 0) {
+ # Specify byte values
+ base1 = substr(src, 1, 1)
+ base2 = substr(src, 2, 1)
+ base3 = substr(src, 3, 1); if (base3 == "") base3 = "="
+ base4 = substr(src, 4, 1); if (base4 == "") base4 = "="
+ # Now find numerical position in BASE64 string
+ byte1 = index(BASE64__BYTES, base1) - 1
+ if (byte1 < 0) byte1 = 0
+ byte2 = index(BASE64__BYTES, base2) - 1
+ if (byte2 < 0) byte2 = 0
+ byte3 = index(BASE64__BYTES, base3) - 1
+ if (byte3 < 0) byte3 = 0
+ byte4 = index(BASE64__BYTES, base4) - 1
+ if (byte4 < 0) byte4 = 0
+ # Reconstruct ASCII string
+ result = result sprintf( "%c", base64__lshift(base64__and(byte1, 63), 2) + base64__rshift(base64__and(byte2, 48), 4) )
+ if (base3 != "=") result = result sprintf( "%c", base64__lshift(base64__and(byte2, 15), 4) + base64__rshift(base64__and(byte3, 60), 2) )
+ if (base4 != "=") result = result sprintf( "%c", base64__lshift(base64__and(byte3, 3), 6) + byte4 )
+ # Decrease incoming string with 4
+ src = substr(src, 5)
+ }
+ return result
+}
diff --git a/lib/nulib/bash/base b/lib/nulib/bash/base
new file mode 100644
index 0000000..cc3d388
--- /dev/null
+++ b/lib/nulib/bash/base
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+##@cooked nocomments
+# shim pour les fonctions de nulib.sh au cas où ce module n'est pas chargée
+if [ -z "$NULIBDIR" -o "$NULIBDIR" != "$NULIBINIT" ]; then
+ function module:() { :; }
+ function function:() { :; }
+ function require:() { :; }
+ function import:() { :; }
+fi
+##@include base.init
+##@include base.core
+##@include base.str
+##@include base.arr
+##@include base.io
+##@include base.eval
+##@include base.split
+##@include base.path
+##@include base.args
+module: base base_ "Chargement de tous les modules base.*"
+NULIB_RECURSIVE_IMPORT=1
+require: base.init base.core base.str base.arr base.io base.eval base.split base.path base.args
diff --git a/lib/nulib/bash/base.args b/lib/nulib/bash/base.args
new file mode 100644
index 0000000..816100a
--- /dev/null
+++ b/lib/nulib/bash/base.args
@@ -0,0 +1,176 @@
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+##@cooked nocomments
+module: base.args base_ "Fonctions de base: analyse d'arguments"
+require: base.arr
+
+function: base_myargs_local "Afficher des commandes pour rendre locales des variables utilisées par base_myargs()
+
+Cela permet d'utiliser base_myargs() à l'intérieur d'une fonction. Par défaut, la génération automatique de l'autocomplete est désactivée."
+function base_myargs_local() {
+ # par défaut, désactiver génération de autocomplete
+ echo "local NULIB_ARGS_HELP_DESC NULIB_ARGS_HELP_USAGE NULIB_ARGS_HELP_OPTIONS args"
+ echo "local NULIB_ARGS_DISABLE_AC=1"
+ echo "local NULIB_ARGS_ONERROR_RETURN=1"
+}
+
+function: base_myargs: "Débuter la description des arguments reconnus par ce script.
+
+Arguments
+: \$1 est un résumé de l'objet de ce script
+: \$2 est le nom du script s'il est différent de \$MYNAME
+
+Le mode opératoire est généralement le suivant:
+~~~
+myargs:
+desc \"faire un traitement\"
+usage \"MYNAME [options] \"
+arg -o:,--output:file output= \"spécifier le fichier destination\"
+arg -h:,--host:host hosts+ \"spécifier les hôtes concernés\"
+arg -c,--count count=1
+parse \"\$@\"; set -- \"\${args[@]}\"
+~~~"
+function base_myargs:() {
+ NULIB_ARGS_HELP_DESC=
+ NULIB_ARGS_HELP_USAGE=
+ NULIB_ARGS_HELP_OPTIONS=()
+ args=()
+ function desc() { base_myargs_desc "$@"; }
+ function usage() { base_myargs_usage "$@"; }
+ function arg() { base_myargs_add "$@"; }
+ function parse() { base_myargs_parse "$@"; }
+}
+
+function: base_myargs_desc ""
+function base_myargs_desc() {
+ NULIB_ARGS_HELP_DESC="$*"
+}
+
+function: base_myargs_usage ""
+function base_myargs_usage() {
+ NULIB_ARGS_HELP_USAGE="$*"
+}
+
+function: base_myargs_add "Ajouter une définition d'option
+
+Syntaxes
+: base_arg MODE
+: base_arg [MODE] -OPTIONS ACTION DESC
+: base_arg [MODE] VARIABLE DESC
+
+MODE peut être l'un des caractères '+', '-', '%' et a un effet sur l'analyse
+entière de la ligne de commande
+* Les caractères '+' et '-' influent sur la méthode d'analyse. Par défaut, les
+ options sont valides n'importe où sur la ligne de commande. Avec '+',
+ l'analyse s'arrête au premier argument qui n'est pas une option. Avec '-', les
+ options sont valides n'importe ou sur la ligne de commande, mais les arguments
+ ne sont pas réordonnés, et apparaissent dans l'ordre de leur mention.
+* Le caractère '%' demande que toutes les variables mentionnées à partir de ce
+ moment soient initialisées. Elle sont garanties d'être vides.
+
+Avec la première syntaxe, on définit précisément l'option. Deux formes sont
+supportées. La forme détermine le type d'action
+* Avec la forme '-OPT VAR[=VALUE]', OPT est une description d'option, VAR un nom
+ de variable à mettre à jour, et VALUE une valeur éventuelle pour les options
+ sans argument. Si plusieurs options sont mentionnées, séparées par des
+ virgules, alors tous les options partagent les mêmes paramètres.
+
+ OPT peut être de la forme '-o' ou '--longopt' pour des options sans arguments.
+ Dans ce cas, VAR obtient le nombre de fois que l'option est mentionnée (vide
+ pour aucune mention, '1' pour une seule mention, etc.), sauf si on utilise la
+ forme VAR=VALUE, auquel cas la variable obtient la valeur VALUE, et le nombre
+ d'occurences de l'option n'est pas compté.
+
+ Pour faciliter la lecture:
+ * '--longopt .' est équivalent à '--longopt longopt'
+ * '--longopt: .' est équivalent à '--longopt: longopt='
+
+ Avec les formes '-o:' et '--longopt:', l'option prend un argument obligatoire.
+ Avec les formes '-o::' et '--longopt::', l'option prend un argument facultatif
+ (dans ce cas, la valeur de l'option sur la ligne de commande doit
+ obligatoirement être collée à l'option.)
+
+ Si ces options sont mentionnées plusieurs fois sur la ligne de commande, alors
+ la variable de destination est un tableau qui contient toutes les valeurs. Le
+ traitement de la valeur d'une variable dépend de la forme utilisée.
+ * Avec une option sans argument, le comportement est celui décrit ci-dessus.
+ * Avec une option qui prend des arguments, la forme '-o: VAR' considère que
+ VAR est un tableau qui contiendra toutes les valeurs mentionnées dans les
+ options. Avec la forme '-o: VAR=', la variable n'est pas un tableau et
+ contient toujours la dernière valeur spécifiée.
+* Dans la forme 'opt \$cmd', la commande cmd est executée avec eval *dès* que
+ l'option est rencontrée. La variable option_ contient l'option, e.g. '-o' ou
+ '--longopt'. Le cas échéant, la variable value_ contient la valeur de
+ l'option. La fonction 'set@ NAME' met à jour la variable NAME, soit en lui
+ donnant la valeur \$value_, soit en l'incrémentant, suivant le type d'option.
+ La fonction 'inc@ NAME' incrémente la variable NAME, 'res@ NAME [VALUE]'
+ initialise la variable à la valeur VALUE, 'add@ NAME [VALUE]' ajoute VALUE à
+ la fin du tableau NAME. Par défaut, VALUE vaut \$value_
+
+Avec la deuxième syntaxe, l'option est déterminée sur la base du nom de la
+variable.
+* Une variable de la forme 'sansarg' est pour une option simple qui ne prend pas
+ d'argument
+* Une variable de la forme 'avecarg=[default-value]' est pour une option qui
+ prend un argument.
+L'option générée est une option longue. En l'occurence, les options générées
+sont respectivement '--sansarg' et '--avecarg:'
+Les variables et les options sont toujours en minuscule. Pour les variables, le
+caractère '-' est remplacé par '_'. Si une option contient une lettre en
+majuscule, l'option courte correspondante à cette lettre sera aussi reconnue.
+
+"
+function base_myargs_add() {
+ # description des options
+ base_array_add args "${@:1:2}"
+ # puis construire la description de l'option pour l'aide
+ local -a os; local o odesc
+ base_array_split os "$1" ,
+ for o in "${os[@]}"; do
+ o="${o%%:*}"
+ [ -n "$odesc" ] && odesc="$odesc, "
+ odesc="$odesc$o"
+ done
+ for o in "${os[@]}"; do
+ if [[ "$o" == *:* ]]; then
+ if [ "${2#\$}" != "$2" ]; then
+ o=ARG
+ else
+ o="${2%%=*}"
+ o="${o^^}"
+ fi
+ [ -n "$odesc" ] && odesc="$odesc "
+ odesc="$odesc$o"
+ fi
+ break
+ done
+ base_array_add NULIB_ARGS_HELP_OPTIONS "$odesc"
+ [ -n "$3" ] && base_array_add NULIB_ARGS_HELP_OPTIONS "$3"
+}
+
+function: base_myargs_show_help ""
+function base_myargs_show_help() {
+ local help="$MYNAME"
+ [ -n "$NULIB_ARGS_HELP_DESC" ] && help="$help: $NULIB_ARGS_HELP_DESC"
+ [ -n "$NULIB_ARGS_HELP_USAGE" ] && help="$help
+
+USAGE
+ $NULIB_ARGS_HELP_USAGE"
+ [ ${#NULIB_ARGS_HELP_OPTIONS[*]} -gt 0 ] && help="$help
+
+OPTIONS"
+ echo "$help"
+ for help in "${NULIB_ARGS_HELP_OPTIONS[@]}"; do
+ echo "$help"
+ done
+}
+
+function: base_myargs_parse ""
+function base_myargs_parse() {
+ [ -z "$NULIB_NO_DISABLE_SET_X" ] && [[ $- == *x* ]] && { set +x; local NULIB_ARGS_SET_X=1; }
+ local r=0
+ if ! parse_opts "${PRETTYOPTS[@]}" "${args[@]}" @ args -- "$@"; then
+ edie "$args"
+ r=$?
+ fi
+ [ -n "$NULIB_ARGS_SET_X" ] && set -x; return $r
+}
diff --git a/lib/nulib/bash/base.arr b/lib/nulib/bash/base.arr
new file mode 100644
index 0000000..0524375
--- /dev/null
+++ b/lib/nulib/bash/base.arr
@@ -0,0 +1,361 @@
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+##@cooked nocomments
+module: base.arr base_ "Fonctions de base: gestion des variables tableaux"
+require: base.core base.str
+
+function: base_array_count "retourner le nombre d'éléments du tableau \$1"
+function base_array_count() {
+ eval "echo \${#$1[*]}"
+}
+
+function: base_array_isempty "tester si le tableau \$1 est vide"
+function base_array_isempty() {
+ eval "[ \${#$1[*]} -eq 0 ]"
+}
+
+function: base_array_new "créer un tableau vide dans la variable \$1"
+function base_array_new() {
+ eval "$1=()"
+}
+
+function: base_array_copy "copier le contenu du tableau \$2 dans le tableau \$1"
+function base_array_copy() {
+ eval "$1=(\"\${$2[@]}\")"
+}
+
+function: base_array_add "ajouter les valeurs \$2..@ à la fin du tableau \$1"
+function base_array_add() {
+ local __aa_a="$1"; shift
+ eval "$__aa_a=(\"\${$__aa_a[@]}\" \"\$@\")"
+}
+
+function: base_array_ins "insérer les valeurs \$2..@ au début du tableau \$1"
+function base_array_ins() {
+ local __aa_a="$1"; shift
+ eval "$__aa_a=(\"\$@\" \"\${$__aa_a[@]}\")"
+}
+
+function: base_array_del "supprimer *les* valeurs \$2 du tableau \$1"
+function base_array_del() {
+ local __ad_v
+ local -a __ad_vs
+ eval '
+for __ad_v in "${'"$1"'[@]}"; do
+ if [ "$__ad_v" != "$2" ]; then
+ __ad_vs=("${__ad_vs[@]}" "$__ad_v")
+ fi
+done'
+ base_array_copy "$1" __ad_vs
+}
+
+function: base_array_addu "ajouter la valeur \$2 au tableau \$1, si la valeur n'y est pas déjà
+
+Retourner vrai si la valeur a été ajoutée"
+function base_array_addu() {
+ local __as_v
+ eval '
+for __as_v in "${'"$1"'[@]}"; do
+ [ "$__as_v" == "$2" ] && return 1
+done'
+ base_array_add "$1" "$2"
+ return 0
+}
+
+function: base_array_insu "insérer la valeur \$2 au début du tableau tableau \$1, si la valeur n'y est pas déjà
+
+Retourner vrai si la valeur a été ajoutée."
+function base_array_insu() {
+ local __as_v
+ eval '
+for __as_v in "${'"$1"'[@]}"; do
+ [ "$__as_v" == "$2" ] && return 1
+done'
+ base_array_ins "$1" "$2"
+ return 0
+}
+
+function: base_array_fillrange "Initialiser le tableau \$1 avec les nombres de \$2(=1) à \$3(=10) avec un step de \$4(=1)"
+function base_array_fillrange() {
+ local -a __af_vs
+ local __af_i="${2:-1}" __af_to="${3:-10}" __af_step="${4:-1}"
+ while [ "$__af_i" -le "$__af_to" ]; do
+ __af_vs=("${__af_vs[@]}" "$__af_i")
+ __af_i=$(($__af_i + $__af_step))
+ done
+ base_array_copy "$1" __af_vs
+}
+
+function: base_array_eq "tester l'égalité des tableaux \$1 et \$2"
+function base_array_eq() {
+ local -a __ae_a1 __ae_a2
+ base_array_copy __ae_a1 "$1"
+ base_array_copy __ae_a2 "$2"
+ [ ${#__ae_a1[*]} -eq ${#__ae_a2[*]} ] || return 1
+ local __ae_v __ae_i=0
+ for __ae_v in "${__ae_a1[@]}"; do
+ [ "$__ae_v" == "${__ae_a2[$__ae_i]}" ] || return 1
+ __ae_i=$(($__ae_i + 1))
+ done
+ return 0
+}
+
+function: base_array_contains "tester si le tableau \$1 contient la valeur \$2"
+function base_array_contains() {
+ local __ac_v
+ eval '
+for __ac_v in "${'"$1"'[@]}"; do
+ [ "$__ac_v" == "$2" ] && return 0
+done'
+ return 1
+}
+
+function: base_array_icontains "tester si le tableau \$1 contient la valeur \$2, sans tenir compte de la casse"
+function base_array_icontains() {
+ local __ac_v
+ eval '
+for __ac_v in "${'"$1"'[@]}"; do
+ [ "${__ac_v,,} == "${2,,}" ] && return 0
+done'
+ return 1
+}
+
+function: base_array_find "si le tableau \$1 contient la valeur \$2, afficher l'index de la valeur. Si le tableau \$3 est spécifié, afficher la valeur à l'index dans ce tableau"
+function base_array_find() {
+ local __af_i __af_v
+ __af_i=0
+ eval '
+for __af_v in "${'"$1"'[@]}"; do
+ if [ "$__af_v" == "$2" ]; then
+ if [ -n "$3" ]; then
+ recho "${'"$3"'[$__af_i]}"
+ else
+ echo "$__af_i"
+ fi
+ return 0
+ fi
+ __af_i=$(($__af_i + 1))
+done'
+ return 1
+}
+
+function: base_array_reverse "Inverser l'ordre des élément du tableau \$1"
+function base_array_reverse() {
+ local -a __ar_vs
+ local __ar_v
+ base_array_copy __ar_vs "$1"
+ base_array_new "$1"
+ for __ar_v in "${__ar_vs[@]}"; do
+ base_array_ins "$1" "$__ar_v"
+ done
+}
+
+function: base_array_replace "dans le tableau \$1, remplacer toutes les occurences de \$2 par \$3..*"
+function base_array_replace() {
+ local __ar_sn="$1"; shift
+ local __ar_f="$1"; shift
+ local -a __ar_s __ar_d
+ local __ar_v
+ base_array_copy __ar_s "$__ar_sn"
+ for __ar_v in "${__ar_s[@]}"; do
+ if [ "$__ar_v" == "$__ar_f" ]; then
+ __ar_d=("${__ar_d[@]}" "$@")
+ else
+ __ar_d=("${__ar_d[@]}" "$__ar_v")
+ fi
+ done
+ base_array_copy "$__ar_sn" __ar_d
+}
+
+function: base_array_each "Pour chacune des valeurs ITEM du tableau \$1, appeler la fonction \$2 avec les arguments (\$3..@ ITEM)"
+function base_array_each() {
+ local __ae_v
+ local -a __ae_a
+ base_array_copy __ae_a "$1"; shift
+ for __ae_v in "${__ae_a[@]}"; do
+ "$@" "$__ae_v"
+ done
+}
+
+function: base_array_map "Pour chacune des valeurs ITEM du tableau \$1, appeler la fonction \$2 avec les arguments (\$3..@ ITEM), et remplacer la valeur par le résultat de la fonction"
+function base_array_map() {
+ local __am_v
+ local -a __am_a __am_vs
+ local __am_an="$1"; shift
+ local __am_f="$1"; shift
+ base_array_copy __am_a "$__am_an"
+ for __am_v in "${__am_a[@]}"; do
+ __am_vs=("${__am_vs[@]}" "$("$__am_f" "$@" "$__am_v")")
+ done
+ base_array_copy "$__am_an" __am_vs
+}
+
+function: base_array_first "afficher la première valeur du tableau \$1"
+function base_array_first() {
+ eval "recho \"\${$1[@]:0:1}\""
+}
+
+function: base_array_last "afficher la dernière valeur du tableau \$1"
+function base_array_last() {
+ eval "recho \"\${$1[@]: -1:1}\""
+}
+
+function: base_array_copy_firsts "copier toutes les valeurs du tableau \$2(=\$1) dans le tableau \$1, excepté la dernière"
+function base_array_copy_firsts() {
+ eval "$1=(\"\${${2:-$1}[@]:0:\$((\${#${2:-$1}[@]}-1))}\")"
+}
+
+function: base_array_copy_lasts "copier toutes les valeurs du tableau \$2(=\$1) dans le tableau \$1, excepté la première"
+function base_array_copy_lasts() {
+ eval "$1=(\"\${${2:-$1}[@]:1}\")"
+}
+
+function: base_array_extend "ajouter le contenu du tableau \$2 au tableau \$1"
+function base_array_extend() {
+ eval "$1=(\"\${$1[@]}\" \"\${$2[@]}\")"
+}
+
+function: base_array_extendu "ajouter chacune des valeurs du tableau \$2 au tableau \$1, si ces valeurs n'y sont pas déjà
+
+Retourner vrai si au moins une valeur a été ajoutée"
+function base_array_extendu() {
+ local __ae_v __ae_s=1
+ eval '
+for __ae_v in "${'"$2"'[@]}"; do
+ base_array_addu "$1" "$__ae_v" && __ae_s=0
+done'
+ return "$__ae_s"
+}
+
+function: base_array_extend_firsts "ajouter toutes les valeurs du tableau \$2 dans le tableau \$1, excepté la dernière"
+function base_array_extend_firsts() {
+ eval "$1=(\"\${$1[@]}\" \"\${$2[@]:0:\$((\${#$2[@]}-1))}\")"
+}
+
+function: base_array_extend_lasts "ajouter toutes les valeurs du tableau \$2 dans le tableau \$1, excepté la première"
+function base_array_extend_lasts() {
+ eval "$1=(\"\${$1[@]}\" \"\${$2[@]:1}\")"
+}
+
+function: base_array_xsplit "créer le tableau \$1 avec chaque élément de \$2 (un ensemble d'éléments séparés par \$3, qui vaut ':' par défaut)"
+function base_array_xsplit() {
+ eval "$1=($(recho_ "$2" | lawk -v RS="${3:-:}" '
+{
+ gsub(/'\''/, "'\'\\\\\'\''")
+ print "'\''" $0 "'\''"
+}'))" #"
+}
+
+function: base_array_xsplitc "variante de base_array_xsplit() où le séparateur est ',' par défaut"
+function base_array_xsplitc() {
+ base_array_xsplit "$1" "$2" "${3:-,}"
+}
+
+function: base_array_split "créer le tableau \$1 avec chaque élément de \$2 (un ensemble d'éléments séparés par \$3, qui vaut ':' par défaut)
+
+Les éléments vides sont ignorés. par exemple \"a::b\" est équivalent à \"a:b\""
+function base_array_split() {
+ eval "$1=($(recho_ "$2" | lawk -v RS="${3:-:}" '
+/^$/ { next }
+{
+ gsub(/'\''/, "'\'\\\\\'\''")
+ print "'\''" $0 "'\''"
+}'))" #"
+}
+
+function: base_array_splitc "variante de base_array_split() où le séparateur est ',' par défaut"
+function base_array_splitc() {
+ base_array_split "$1" "$2" "${3:-,}"
+}
+
+function: base_array_xsplitl "créer le tableau \$1 avec chaque ligne de \$2"
+function base_array_xsplitl() {
+ eval "$1=($(recho_ "$2" | strnl2lf | lawk '
+{
+ gsub(/'\''/, "'\'\\\\\'\''")
+ print "'\''" $0 "'\''"
+}'))" #"
+}
+
+function: base_array_splitl "créer le tableau \$1 avec chaque ligne de \$2
+
+Les lignes vides sont ignorés."
+function base_array_splitl() {
+ eval "$1=($(recho_ "$2" | strnl2lf | lawk '
+/^$/ { next }
+{
+ gsub(/'\''/, "'\'\\\\\'\''")
+ print "'\''" $0 "'\''"
+}'))" #"
+}
+
+function: base_array_join "afficher le contenu du tableau \$1 sous forme d'une liste de valeurs séparées par \$2 (qui vaut ':' par défaut)
+
+* Si \$1==\"@\", alors les éléments du tableaux sont les arguments de la fonction à partir de \$3
+* Si \$1!=\"@\" et que le tableau est vide, afficher \$3
+* Si \$1!=\"@\", \$4 et \$5 sont des préfixes et suffixes à rajouter à chaque élément"
+function base_array_join() {
+ local __aj_an __aj_l __aj_j __aj_s="${2:-:}" __aj_pf __aj_sf
+ if [ "$1" == "@" ]; then
+ __aj_an="\$@"
+ shift; shift
+ else
+ __aj_an="\${$1[@]}"
+ __aj_pf="$4"
+ __aj_sf="$5"
+ fi
+ eval '
+for __aj_l in "'"$__aj_an"'"; do
+ __aj_j="${__aj_j:+$__aj_j'"$__aj_s"'}$__aj_pf$__aj_l$__aj_sf"
+done'
+ if [ -n "$__aj_j" ]; then
+ recho "$__aj_j"
+ elif [ "$__aj_an" != "\$@" -a -n "$3" ]; then
+ recho "$3"
+ fi
+}
+
+function: base_array_joinc "afficher les éléments du tableau \$1 séparés par ','"
+function base_array_joinc() {
+ base_array_join "$1" , "$2" "$3" "$4"
+}
+
+function: base_array_joinl "afficher les éléments du tableau \$1 à raison d'un élément par ligne"
+function base_array_joinl() {
+ base_array_join "$1" "
+" "$2" "$3" "$4"
+}
+
+function: base_array_mapjoin "map le tableau \$1 avec la fonction \$2, puis afficher le résultat en séparant chaque élément par \$3
+
+Les arguments et la sémantique sont les mêmes que pour base_array_join() en
+tenant compte de l'argument supplémentaire \$2 qui est la fonction pour
+base_array_map() (les autres arguments sont décalés en conséquence)"
+function base_array_mapjoin() {
+ local __amj_src="$1" __amj_func="$2" __amj_sep="$3"
+ shift; shift; shift
+ if [ "$__amj_src" == "@" ]; then
+ local -a __amj_tmpsrc
+ __amj_tmpsrc=("$@")
+ __amj_src=__amj_tmpsrc
+ set --
+ fi
+ local -a __amj_tmp
+ base_array_copy __amj_tmp "$__amj_src"
+ base_array_map __amj_tmp "$__amj_func"
+ base_array_join __amj_tmp "$__amj_sep" "$@"
+}
+
+function: base_array_fix_paths "Corriger les valeurs du tableau \$1. Les valeurs contenant le séparateur \$2(=':') sont séparées en plusieurs valeurs.
+
+Par exemple avec le tableau input=(a b:c), le résultat est input=(a b c)"
+function base_array_fix_paths() {
+ local __afp_an="$1" __afp_s="${2:-:}"
+ local -a __afp_vs
+ local __afp_v
+ base_array_copy __afp_vs "$__afp_an"
+ base_array_new "$__afp_an"
+ for __afp_v in "${__afp_vs[@]}"; do
+ base_array_split __afp_v "$__afp_v" "$__afp_s"
+ base_array_extend "$__afp_an" __afp_v
+ done
+}
diff --git a/lib/nulib/bash/base.core b/lib/nulib/bash/base.core
new file mode 100644
index 0000000..ef130f6
--- /dev/null
+++ b/lib/nulib/bash/base.core
@@ -0,0 +1,458 @@
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+##@cooked nocomments
+module: base.core base_ "Fonctions de base: fondement"
+
+function: echo_ "afficher la valeur \$* sans passer à la ligne"
+function echo_() { echo -n "$*"; }
+
+function: recho "afficher une valeur brute.
+
+contrairement à la commande echo, ne reconnaitre aucune option (i.e. -e, -E, -n
+ne sont pas signifiants)"
+function recho() {
+ if [[ "${1:0:2}" == -[eEn] ]]; then
+ local first="${1:1}"; shift
+ echo -n -
+ echo "$first" "$@"
+ else
+ echo "$@"
+ fi
+}
+
+function: recho_ "afficher une valeur brute, sans passer à la ligne.
+
+contrairement à la commande echo, ne reconnaitre aucune option (i.e. -e, -E, -n
+ne sont pas signifiants)"
+function recho_() {
+ if [[ "${1:0:2}" == -[eEn] ]]; then
+ local first="${1:1}"; shift
+ echo -n -
+ echo -n "$first" "$@"
+ else
+ echo -n "$@"
+ fi
+}
+
+function: _qval "Dans la chaine \$*, remplacer:
+~~~
+\\ par \\\\
+\" par \\\"
+\$ par \\\$
+\` par \\\`
+~~~
+
+Cela permet de quoter une chaine à mettre entre guillements.
+
+note: la protection de ! n'est pas effectuée, parce que le comportement du shell
+est incohérent entre le shell interactif et les scripts. Pour une version plus
+robuste, il est nécessaire d'utiliser un programme externe tel que sed ou awk"
+function _qval() {
+ local s="$*"
+ s="${s//\\/\\\\}"
+ s="${s//\"/\\\"}"
+ s="${s//\$/\\\$}"
+ s="${s//\`/\\\`}"
+ recho_ "$s"
+}
+
+function: base_should_quote "Tester si la chaine \$* doit être mise entre quotes"
+function base_should_quote() {
+ # pour optimiser, toujours mettre entre quotes si plusieurs arguments sont
+ # spécifiés ou si on spécifie une chaine vide ou de plus de 80 caractères
+ [ $# -eq 0 -o $# -gt 1 -o ${#1} -eq 0 -o ${#1} -gt 80 ] && return 0
+ # sinon, tester si la chaine contient des caractères spéciaux
+ local s="$*"
+ s="${s//[a-zA-Z0-9]/}"
+ s="${s//,/}"
+ s="${s//./}"
+ s="${s//+/}"
+ s="${s//\//}"
+ s="${s//-/}"
+ s="${s//_/}"
+ s="${s//=/}"
+ [ -n "$s" ]
+}
+
+function: qval "Afficher la chaine \$* quotée avec \""
+function qval() {
+ echo -n \"
+ _qval "$@"
+ echo \"
+}
+
+function: qvalm "Afficher la chaine \$* quotée si nécessaire avec \""
+function qvalm() {
+ if base_should_quote "$@"; then
+ echo -n \"
+ _qval "$@"
+ echo \"
+ else
+ recho "$@"
+ fi
+}
+
+function: qvalr "Afficher la chaine \$* quotée si nécessaire avec \", sauf si elle est vide"
+function qvalr() {
+ if [ -z "$*" ]; then
+ :
+ elif base_should_quote "$@"; then
+ echo -n \"
+ _qval "$@"
+ echo \"
+ else
+ recho "$@"
+ fi
+}
+
+function: qvals "Afficher chaque argument de cette fonction quotée le cas échéant avec \", chaque valeur étant séparée par un espace"
+function qvals() {
+ local arg first=1
+ for arg in "$@"; do
+ [ -z "$first" ] && echo -n " "
+ if base_should_quote "$arg"; then
+ echo -n \"
+ _qval "$arg"
+ echo -n \"
+ else
+ recho_ "$arg"
+ fi
+ first=
+ done
+ [ -z "$first" ] && echo
+}
+
+function: qwc "Dans la chaine \$*, remplacer:
+~~~
+ \\ par \\\\
+\" par \\\"
+\$ par \\\$
+\` par \\\`
+~~~
+puis quoter la chaine avec \", sauf les wildcards *, ? et [class]
+
+Cela permet de quoter une chaine permettant de glober des fichiers, e.g
+~~~
+eval \"ls \$(qwc \"\$value\")\"
+~~~
+
+note: la protection de ! n'est pas effectuée, parce que le comportement du shell
+est incohérent entre le shell interactif et les scripts. Pour une version plus
+robuste, il est nécessaire d'utiliser un programme externe tel que sed ou awk"
+function qwc() {
+ local s="$*"
+ s="${s//\\/\\\\}"
+ s="${s//\"/\\\"}"
+ s="${s//\$/\\\$}"
+ s="${s//\`/\\\`}"
+ local r a b c
+ while [ -n "$s" ]; do
+ a=; b=; c=
+ a=; [[ "$s" == *\** ]] && { a="${s%%\**}"; a=${#a}; }
+ b=; [[ "$s" == *\?* ]] && { b="${s%%\?*}"; b=${#b}; }
+ c=; [[ "$s" == *\[* ]] && { c="${s%%\[*}"; c=${#c}; }
+ if [ -z "$a" -a -z "$b" -a -z "$c" ]; then
+ r="$r\"$s\""
+ break
+ fi
+ if [ -n "$a" ]; then
+ [ -n "$b" ] && [ $a -lt $b ] && b=
+ [ -n "$c" ] && [ $a -lt $c ] && c=
+ fi
+ if [ -n "$b" ]; then
+ [ -n "$a" ] && [ $b -lt $a ] && a=
+ [ -n "$c" ] && [ $b -lt $c ] && c=
+ fi
+ if [ -n "$c" ]; then
+ [ -n "$a" ] && [ $c -lt $a ] && a=
+ [ -n "$b" ] && [ $c -lt $b ] && b=
+ fi
+ if [ -n "$a" ]; then # PREFIX*
+ a="${s%%\**}"
+ s="${s#*\*}"
+ [ -n "$a" ] && r="$r\"$a\""
+ r="$r*"
+ elif [ -n "$b" ]; then # PREFIX?
+ a="${s%%\?*}"
+ s="${s#*\?}"
+ [ -n "$a" ] && r="$r\"$a\""
+ r="$r?"
+ elif [ -n "$c" ]; then # PREFIX[class]
+ a="${s%%\[*}"
+ b="${s#*\[}"; b="${b%%\]*}"
+ s="${s:$((${#a} + ${#b} + 2))}"
+ [ -n "$a" ] && r="$r\"$a\""
+ r="$r[$b]"
+ fi
+ done
+ recho_ "$r"
+}
+
+function: qlines "Traiter chaque ligne de l'entrée standard pour en faire des chaines quotées avec '"
+function qlines() {
+ sed "s/'/'\\\\''/g; s/.*/'&'/g"
+}
+
+function: setv "initialiser la variable \$1 avec la valeur \$2..*
+
+note: en principe, la syntaxe est 'setv var values...'. cependant, la syntaxe 'setv var=values...' est supportée aussi"
+function setv() {
+ local s__var="$1"; shift
+ if [[ "$s__var" == *=* ]]; then
+ set -- "${s__var#*=}" "$@"
+ s__var="${s__var%%=*}"
+ fi
+ eval "$s__var=\"\$*\""
+}
+
+function: _setv "Comme la fonction setv() mais ne supporte que la syntaxe '_setv var values...'
+
+Cette fonction est légèrement plus rapide que setv()"
+function _setv() {
+ local s__var="$1"; shift
+ eval "$s__var=\"\$*\""
+}
+
+function: echo_setv "Afficher la commande qui serait lancée par setv \"\$@\""
+function echo_setv() {
+ local s__var="$1"; shift
+ if [[ "$s__var" == *=* ]]; then
+ set -- "${s__var#*=}" "$@"
+ s__var="${s__var%%=*}"
+ fi
+ echo "$s__var=$(qvalr "$*")"
+}
+
+function: echo_setv2 "Afficher la commande qui recrée la variable \$1.
+
+Equivalent à
+~~~
+echo_setv \"\$1=\${!1}\"
+~~~
+
+Si d'autres arguments que le nom de la variable sont spécifiés, cette fonction
+se comporte comme echo_setv()"
+function echo_setv2() {
+ local s__var="$1"; shift
+ if [[ "$s__var" == *=* ]]; then
+ set -- "${s__var#*=}" "$@"
+ s__var="${s__var%%=*}"
+ fi
+ if [ $# -eq 0 ]; then
+ echo_setv "$s__var" "${!s__var}"
+ else
+ echo_setv "$s__var" "$@"
+ fi
+}
+
+function: seta "initialiser le tableau \$1 avec les valeurs \$2..@
+
+note: en principe, la syntaxe est 'seta array values...'. cependant, la syntaxe
+'seta array=values...' est supportée aussi"
+function seta() {
+ local s__array="$1"; shift
+ if [[ "$s__array" == *=* ]]; then
+ set -- "${s__array#*=}" "$@"
+ s__array="${s__array%%=*}"
+ fi
+ eval "$s__array=(\"\$@\")"
+}
+
+function: _seta "Comme la fonction seta() mais ne supporte que la syntaxe '_seta array values...'
+
+Cette fonction est légèrement plus rapide que seta()"
+function _seta() {
+ local s__array="$1"; shift
+ eval "$s__array=(\"\$@\")"
+}
+
+function: echo_seta "Afficher la commande qui serait lancée par seta \"\$@\""
+function echo_seta() {
+ local s__var="$1"; shift
+ if [[ "$s__var" == *=* ]]; then
+ set -- "${s__var#*=}" "$@"
+ s__var="${s__var%%=*}"
+ fi
+ echo "$s__var=($(qvals "$@"))"
+}
+
+function: echo_seta2 "Afficher la commande qui recrée le tableau \$1
+
+Si d'autres arguments que le nom de tableau sont spécifiés, cette fonction se
+comporte comme echo_seta()"
+function echo_seta2() {
+ local s__var="$1"; shift
+ if [[ "$s__var" == *=* ]]; then
+ set -- "${s__var#*=}" "$@"
+ s__var="${s__var%%=*}"
+ elif [ $# -eq 0 ]; then
+ eval "set -- \"\${$s__var[@]}\""
+ fi
+ echo "$s__var=($(qvals "$@"))"
+}
+
+function: setx "Initialiser une variable avec le résultat d'une commande
+
+* syntaxe 1: initialiser la variable \$1 avec le résultat de la commande \"\$2..@\"
+ ~~~
+ setx var cmd
+ ~~~
+ note: en principe, la syntaxe est 'setx var cmd args...'. cependant, la syntaxe
+ 'setx var=cmd args...' est supportée aussi
+
+* syntaxe 2: initialiser le tableau \$1 avec le résultat de la commande
+ \"\$2..@\", chaque ligne du résultat étant un élément du tableau
+ ~~~
+ setx -a array cmd
+ ~~~
+ note: en principe, la syntaxe est 'setx -a array cmd args...'. cependant, la
+ syntaxe 'setx -a array=cmd args...' est supportée aussi"
+function setx() {
+ if [ "$1" == -a ]; then
+ shift
+ local s__array="$1"; shift
+ if [[ "$s__array" == *=* ]]; then
+ set -- "${s__array#*=}" "$@"
+ s__array="${s__array%%=*}"
+ fi
+ eval "$s__array=($("$@" | qlines))"
+ else
+ local s__var="$1"; shift
+ if [[ "$s__var" == *=* ]]; then
+ set -- "${s__var#*=}" "$@"
+ s__var="${s__var%%=*}"
+ fi
+ eval "$s__var="'"$("$@")"'
+ fi
+}
+
+function: _setvx "Comme la fonction setx() mais ne supporte que l'initialisation d'une variable scalaire avec la syntaxe '_setvx var cmd args...' pour gagner (un peu) en rapidité d'exécution."
+function _setvx() {
+ local s__var="$1"; shift
+ eval "$s__var="'"$("$@")"'
+}
+
+function: _setax "Comme la fonction setx() mais ne supporte que l'initialisation d'un tableau avec la syntaxe '_setax array cmd args...' pour gagner (un peu) en rapidité d'exécution."
+function _setax() {
+ local s__array="$1"; shift
+ eval "$s__array=($("$@" | qlines))"
+}
+
+function: base_is_defined "tester si la variable \$1 est définie"
+function base_is_defined() {
+ [ -n "$(declare -p "$1" 2>/dev/null)" ]
+}
+
+function: base_is_array "tester si la variable \$1 est un tableau"
+function base_is_array() {
+ [[ "$(declare -p "$1" 2>/dev/null)" =~ declare\ -[^\ ]*a[^\ ]*\ ]]
+}
+
+function: base_array_local "afficher les commandes pour faire une copie dans la variable locale \$1 du tableau \$2"
+function base_array_local() {
+ if [ "$1" == "$2" ]; then
+ declare -p "$1" 2>/dev/null || echo "local -a $1"
+ else
+ echo "local -a $1; $1=(\"\${$2[@]}\")"
+ fi
+}
+
+function: base_upvar "Implémentation de upvar() de http://www.fvue.nl/wiki/Bash:_Passing_variables_by_reference
+
+USAGE
+~~~
+local varname && base_upvar varname values...
+~~~
+* @param varname Variable name to assign value to
+* @param values Value(s) to assign. If multiple values (> 1), an array is
+ assigned, otherwise a single value is assigned."
+function base_upvar() {
+ if unset -v "$1"; then
+ if [ $# -lt 2 ]; then
+ eval "$1=\"\$2\""
+ else
+ eval "$1=(\"\${@:2}\")"
+ fi
+ fi
+}
+
+function: base_array_upvar "Comme base_upvar() mais force la création d'un tableau, même s'il y a que 0 ou 1 argument"
+function base_array_upvar() {
+ unset -v "$1" && eval "$1=(\"\${@:2}\")"
+}
+
+function: base_upvars "Implémentation modifiée de upvars() de http://www.fvue.nl/wiki/Bash:_Passing_variables_by_reference
+
+Par rapport à l'original, il n'est plus nécessaire de préfixer une variable
+scalaire avec -v, et -a peut être spécifié sans argument.
+
+USAGE
+~~~
+local varnames... && base_upvars [varname value | -aN varname values...]...
+~~~
+* @param -a assigns remaining values to varname as array
+* @param -aN assigns next N values to varname as array. Returns 1 if wrong
+ number of options occurs"
+function base_upvars() {
+ while [ $# -gt 0 ]; do
+ case "$1" in
+ -a)
+ unset -v "$2" && eval "$2=(\"\${@:3}\")"
+ break
+ ;;
+ -a*)
+ unset -v "$2" && eval "$2=(\"\${@:3:${1#-a}}\")"
+ shift $((${1#-a} + 2)) || return 1
+ ;;
+ *)
+ unset -v "$1" && eval "$1=\"\$2\""
+ shift; shift
+ ;;
+ esac
+ done
+}
+
+function: base_set_debug "Passer en mode DEBUG"
+function base_set_debug() {
+ export NULIB_DEBUG=1
+}
+
+function: base_is_debug "Tester si on est en mode DEBUG"
+function base_is_debug() {
+ [ -n "$NULIB_DEBUG" ]
+}
+
+function: lawk "Lancer GNUawk avec la librairie 'base'"
+function lawk() {
+ gawk -i base "$@"
+}
+
+function: cawk "Lancer GNUawk avec LANG=C et la librairie 'base'
+
+Le fait de forcer la valeur de LANG permet d'éviter les problèmes avec la locale"
+function cawk() {
+ LANG=C gawk -i base "$@"
+}
+
+function: lsort "Lancer sort avec support de la locale courante"
+function: csort "Lancer sort avec LANG=C pour désactiver le support de la locale
+
+Avec LANG!=C, sort utilise les règles de la locale pour le tri, et par
+exemple, avec LANG=fr_FR.UTF-8, la locale indique que les ponctuations doivent
+être ignorées."
+function lsort() { sort "$@"; }
+function csort() { LANG=C sort "$@"; }
+
+function: lgrep "Lancer grep avec support de la locale courante"
+function: cgrep "Lancer grep avec LANG=C pour désactiver le support de la locale"
+function lgrep() { grep "$@"; }
+function cgrep() { LANG=C grep "$@"; }
+
+function: lsed "Lancer sed avec support de la locale courante"
+function: csed "Lancer sed avec LANG=C pour désactiver le support de la locale"
+function lsed() { sed "$@"; }
+function csed() { LANG=C sed "$@"; }
+
+function: ldiff "Lancer diff avec support de la locale courante"
+function: cdiff "Lancer diff avec LANG=C pour désactiver le support de la locale"
+function ldiff() { diff "$@"; }
+function cdiff() { LANG=C diff "$@"; }
diff --git a/lib/nulib/bash/base.eval b/lib/nulib/bash/base.eval
new file mode 100644
index 0000000..d443a2d
--- /dev/null
+++ b/lib/nulib/bash/base.eval
@@ -0,0 +1,468 @@
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+##@cooked nocomments
+module: base.eval base_ "Fonctions de base: évaluation d'expressions"
+require: base.str base.arr
+
+################################################################################
+# Chaines
+
+function: base_evals "Appliquer à une chaine de caractères une suite de traitements, e.g:
+~~~
+base_evals var deref +suffix
+~~~
+est équivalent à
+~~~
+echo \"\${var}suffix\"
+~~~
+
+En commençant avec la valeur initiale \$1, les arguments \$2..* sont des
+opérations à appliquer dans l'ordre.
+
+Les opérations suivantes considèrent que la valeur courante est un nom de
+variable:
+~~~
+:- := :? :+ deref dcount
+~~~
+
+Toutes les autres opérations travaillent directement avec la valeur
+courante. Les opérations suivantes appliquent une transformation:
+~~~
+# % / : ^ , +# -# +% -% + - mid repl
+~~~
+IMPORTANT: aucune de ces fonctions ne met en échappement les valeur des
+patterns. Ainsi, si un pattern contient des caractères interdits comme \\ ou \$,
+il faut d'abord le traiter avec _qval()
+
+Les opérations suivantes font un test sur la valeur et retournent immédiatement:
+~~~
+= == != < > -eq -ne -lt -le -gt -ge -n -z
+~~~
+
+La syntaxe des opérateurs standards de bash est reprise autant que possible, i.e
+si on a l'habitude d'écrire ${varOP} en bash, alors la syntaxe à utiliser à
+priori est 'base_evals var OP' ou 'base_evals var deref OP' suivant les
+opérateurs.
+
+Autres opérateurs:
+~~~
+deref indirection
+dcount nombre d'éléments du tableau
++#STR ajouter un préfixe
+-#STR supprimer un préfixe
++%STR ou +STR ajouter un suffixe
+-%STR ou -STR supprimer un suffixe
+mid RANGE traiter la chaine avec base_strmid()
+repl FROM TO traiter la chaine avec base_strrepl()
+~~~
+
+Tout autre opérateur est traité comme un appel à une fonction qui prend un seul
+argument, la valeur courante, et qui affiche le résultat."
+function base_evals() {
+ local -a es__tmp
+ local es__value="$1"; shift
+ while [ $# -gt 0 ]; do
+ case "$1" in
+ # l'argument est le nom de la variable
+ :-*|:=*|:\?*|:+*) eval 'es__value="${'"${es__value}$1"'}"';;
+ d|deref) es__value="${!es__value}";;
+ dc|dcount|ds|dsize)
+ es__value="${es__value}[@]"
+ es__tmp=("${!es__value}")
+ es__value="${#es__tmp[@]}"
+ ;;
+ # l'argument est la valeur de la variable
+ \#*|%*|/*|:*|^*|,*) eval 'es__value="${es__value'"$1"'}"';;
+ l|length) es__value="${#es__value}";;
+ =|==|!=|\<|\>|-eq|-ne|-lt|-le|-gt|-ge)
+ es__tmp=(\[ "$es__value" "$@" ]); "${es__tmp[@]}"; return $?;;
+ -n|-z) es__tmp=(\[ "$1" "$es__value" ]); "${es__tmp[@]}"; return $?;;
+ +#*) eval 'es__value="'"${1#+#}"'$es__value"';;
+ -#*) eval 'es__value="${es__value'"${1#-}"'}"';;
+ +%*) eval 'es__value="$es__value"'"${1#+%}";;
+ +*) eval 'es__value="$es__value"'"${1#+}";;
+ -%*) eval 'es__value="${es__value'"${1#-}"'}"';;
+ -*) eval 'es__value="${es__value%'"${1#-}"'}"';;
+ mid|strmid|base_strmid) eval 'es__value="$(base_strmid "$2" "$es__value")"'; shift;;
+ repl|strrepl|base_strrepl) eval 'es__value="$(base_strrepl "$2" "$3" "$es__value")"'; shift; shift;;
+ *) es__value="$("$1" "$es__value")";;
+ esac
+ shift
+ done
+ echo "$es__value"
+}
+
+function: base_setxs "équivalent à setx \$1 evals \$2..@"
+function base_setxs() {
+ local -a ss__args
+ if [ "$1" == -a ]; then ss__args=(-a); shift; fi
+ local ss__var="$1"; shift
+ if [[ "$ss__var" == *=* ]]; then
+ set -- "${ss__var#*=}" "$@"
+ ss__var="${ss__var%%=*}"
+ fi
+ ss__args=("${ss__args[@]}" "$ss__var")
+ setx "${ss__args[@]}" base_evals "$@"
+}
+
+function: base_cmds "lancer une commande avec comme argument le résultat de evals
+
+Par exemple, les deux commandes suivantes sont équivalentes:
+~~~
+base_cmds CMD ARGS... // EVALARGS
+CMD ARGS... \"\$(evals EVALARGS)\"
+~~~"
+function base_cmds() {
+ local cs__arg
+ local -a cs__cmd
+ while [ $# -gt 0 ]; do
+ cs__arg="$1"; shift
+ [ "$cs__arg" == // ] && break
+ cs__cmd=("${cs__cmd[@]}" "$cs__arg")
+ done
+ "${cs__cmd[@]}" "$(base_evals "$@")"
+}
+
+function: base_evalm "construire une chaine en mixant chaines statiques et évaluations de commandes
+
+Par exemple, les deux commandes suivantes sont équivalentes:
+~~~
+evalm //\"string\" cmd args // cmd args //\"string\"
+echo \"string\$(cmd args)\$(cmd args)string\"
+~~~"
+function base_evalm() {
+ local em__val em__arg
+ local -a em__cmd
+ while [ $# -gt 0 ]; do
+ em__arg="$1"
+ if [ "${em__arg#//}" != "$em__arg" ]; then
+ em__val="$em__val${em__arg#//}"
+ shift
+ continue
+ fi
+ em__cmd=()
+ while [ $# -gt 0 ]; do
+ em__arg="$1"
+ [ "${em__arg#//}" != "$em__arg" ] && break
+ shift
+ if [ "${em__arg%//}" != "$em__arg" ]; then
+ local em__tmp="${em__arg%//}"
+ if [ -z "${em__tmp//\\/}" ]; then
+ em__arg="${em__arg#\\}"
+ em__cmd=("${em__cmd[@]}" "$em__arg")
+ continue
+ fi
+ fi
+ em__cmd=("${em__cmd[@]}" "$em__arg")
+ done
+ [ ${#em__cmd[*]} -gt 0 ] && em__val="$em__val$("${em__cmd[@]}")"
+ done
+ echo "$em__val"
+}
+
+function: base_setxm "équivalent à setx \$1 evalm \$2..@"
+function base_setxm() {
+ local -a sm__args
+ if [ "$1" == -a ]; then sm__args=(-a); shift; fi
+ local sm__var="$1"; shift
+ if [[ "$sm__var" == *=* ]]; then
+ set -- "${sm__var#*=}" "$@"
+ sm__var="${sm__var%%=*}"
+ fi
+ sm__args=("${sm__args[@]}" "$sm__var")
+ setx "${sm__args[@]}" base_evalm "$@"
+}
+
+function: base_cmdm "lancer une commande avec comme argument le résultat de evalm
+
+Par exemple, les deux commandes suivantes sont équivalentes:
+~~~
+base_cmdm CMD ARGS... // EVALARGS
+CMD ARGS... \"\$(evalm EVALARGS)\"
+~~~"
+function base_cmdm() {
+ local cm__arg
+ local -a cm__cmd
+ while [ $# -gt 0 ]; do
+ cm__arg="$1"; shift
+ [ "$cm__arg" == // ] && break
+ cm__cmd=("${cm__cmd[@]}" "$cm__arg")
+ done
+ "${cm__cmd[@]}" "$(base_evalm "$@")"
+}
+
+################################################################################
+# Nombres
+
+function: base_evali "Evaluer une expression numérique"
+function base_evali() {
+ echo "$(($*))"
+}
+
+################################################################################
+# Tableaux
+
+################################################################################
+# Composition
+
+function: base_evalc "Implémenter une syntaxe lisible et naturelle permettant d'enchainer des traitements sur une valeur.
+
+Par exemple, la commande
+~~~
+evalc cmd1... // cmd2... // cmd3...
+~~~
+est équivalente à la commande
+~~~
+cmd3... \"\$(cmd2... \"\$(cmd1...)\")\"
+~~~"
+function base_evalc() {
+ local ec__arg ec__cmd ec__finalcmd
+
+ while [ $# -gt 0 ]; do
+ ec__arg="$1"; shift
+ if [ "$ec__arg" == // ]; then
+ if [ ${#ec__cmd} -gt 0 ]; then
+ if [ ${#ec__finalcmd} -eq 0 ]; then ec__finalcmd="$ec__cmd"
+ else ec__finalcmd="$ec__cmd \$($ec__finalcmd)"
+ fi
+ fi
+ ec__cmd=
+ continue
+ elif [ "${ec__arg%//}" != "$ec__arg" ]; then
+ local tmp="${ec__arg%//}"
+ [ -z "${tmp//\\/}" ] && ec__arg="${ec__arg#\\}"
+ fi
+ ec__cmd="$ec__cmd \"$(_qval "$ec__arg")\""
+ done
+ if [ ${#ec__cmd} -gt 0 ]; then
+ if [ ${#ec__finalcmd} -eq 0 ]; then ec__finalcmd="$ec__cmd"
+ else ec__finalcmd="$ec__cmd \$($ec__finalcmd)"
+ fi
+ fi
+ eval "$ec__finalcmd"
+}
+
+function: base_setxc "équivalent à setx \$1 evalc \$2..@"
+function base_setxc() {
+ local -a sx__args
+ if [ "$1" == -a ]; then sx__args=(-a); shift; fi
+ local sx__var="$1"; shift
+ if [[ "$sx__var" == *=* ]]; then
+ set -- "${sx__var#*=}" "$@"
+ sx__var="${sx__var%%=*}"
+ fi
+ sx__args=("${sx__args[@]}" "$sx__var")
+ setx "${sx__args[@]}" base_evalc "$@"
+}
+
+################################################################################
+# Chainage
+
+function: base_evalp "Implémenter une syntaxe alternative permettant d'enchainer des traitements sur un flux de données.
+
+Par exemple, la commande
+~~~
+evalp cmd1... // cmd2... // cmd3...
+~~~
+affiche le résultat de la commande
+~~~
+cmd1... | cmd2... | cmd3...
+~~~
+
+Typiquement, cette fonction permet de faciliter la *construction* d'un
+enchainement de commandes par programme, ou de faciliter l'utilisation de la
+fonction setx() pour récupérer le résultat d'un enchainement. Dans les autres
+cas, il est plus simple et naturel d'écrire les enchainements avec la syntaxe de
+bash."
+function base_evalp() {
+ local ep__arg ep__cmd
+
+ while [ $# -gt 0 ]; do
+ ep__arg="$1"; shift
+ if [ "$ep__arg" == // ]; then
+ ep__cmd="$ep__cmd |"
+ continue
+ elif [ "${ep__arg%//}" != "$ep__arg" ]; then
+ local ep__tmp="${ep__arg%//}"
+ if [ -z "${ep__tmp//\\/}" ]; then
+ ep__arg="${ep__arg#\\}"
+ fi
+ fi
+ ep__cmd="${ep__cmd:+$ep__cmd }\"$(_qval "$ep__arg")\""
+ done
+ eval "$ep__cmd"
+}
+
+function: base_setxp "équivalent à setx \$1 evalp \$2..@"
+function base_setxp() {
+ local -a sp__args
+ if [ "$1" == -a ]; then sp__args=(-a); shift; fi
+ local sp__var="$1"; shift
+ if [[ "$sp__var" == *=* ]]; then
+ set -- "${sp__var#*=}" "$@"
+ sp__var="${sp__var%%=*}"
+ fi
+ sp__args=("${sp__args[@]}" "$sp__var")
+ setx "${sp__args[@]}" base_evalp "$@"
+}
+
+function: base_cmdp "lancer une commande avec comme argument le résultat de evalp
+
+Par exemple, les deux commandes suivantes sont équivalentes:
+~~~
+base_cmdp CMD ARGS... // EVALARGS
+CMD ARGS... \"\$(evalp EVALARGS)\"
+~~~"
+function base_cmdp() {
+ local cp__arg
+ local -a cp__cmd
+ while [ $# -gt 0 ]; do
+ cp__arg="$1"; shift
+ [ "$cp__arg" == // ] && break
+ cp__cmd=("${cp__cmd[@]}" "$cp__arg")
+ done
+ "${cp__cmd[@]}" "$(base_evalp "$@")"
+}
+
+################################################################################
+# Générique
+
+function: base_evalx ""
+function base_evalx() {
+ :
+}
+
+function: base_setxx "équivalent à setx \$1 evalx \$2..@"
+function base_setxx() {
+ local -a sx__args
+ if [ "$1" == -a ]; then sx__args=(-a); shift; fi
+ local sx__var="$1"; shift
+ if [[ "$sx__var" == *=* ]]; then
+ set -- "${sx__var#*=}" "$@"
+ sx__var="${sx__var%%=*}"
+ fi
+ sx__args=("${sx__args[@]}" "$sx__var")
+ setx "${sx__args[@]}" base_evalx "$@"
+}
+
+function: base_cmdx "lancer une commande avec comme argument le résultat de evalx
+
+Par exemple, les deux commandes suivantes sont équivalentes:
+~~~
+base_cmdx CMD ARGS... // EVALARGS
+CMD ARGS... \"\$(evalx EVALARGS)\"
+~~~"
+function base_cmdx() {
+ local cx__arg
+ local -a cx__cmd
+ while [ $# -gt 0 ]; do
+ cx__arg="$1"; shift
+ [ "$cx__arg" == // ] && break
+ cx__cmd=("${cx__cmd[@]}" "$cx__arg")
+ done
+ "${cx__cmd[@]}" "$(base_evalx "$@")"
+}
+
+function: base_cmdsplitf "\
+Cette fonction doit être appelée avec N arguments (avec N>1). Elle analyse et
+découpe l'argument \$N comme avec une ligne de commande du shell. Ensuite, elle
+appelle la fonction \$1 avec les arguments de \$2 à \${N-1}, suivi des arguments
+obtenus lors de l'analyse de l'argument \$N. Par exemple, la commande suivante:
+~~~
+strsplitf cmd arg1 \"long arg2\" \"arg3 'long arg4'\"
+~~~
+est équivalente à:
+~~~
+cmd arg1 \"long arg2\" arg3 \"long arg4\"
+~~~
+
+Retourner le code 127 si la fonction à appeler n'est pas spécifiée. Retourner le
+code 126 si une erreur s'est produite lors de l'analyse de l'argument \$N"
+function base_cmdsplitf() {
+ [ $# -gt 0 ] || return 127
+ local func count
+ func="$1"; shift
+ count=$#
+ if [ $count -gt 0 ]; then
+ eval 'set -- "${@:1:$(($count-1))}" '"${!count}" || return 126
+ fi
+ "$func" "$@"
+}
+
+################################################################################
+# Tests
+
+function: testx "Faire un test unaire avec la commande [ sur une valeur calculée avec evalx.
+
+Utiliser la syntaxe 'testx op cmds...' e.g.
+~~~
+testx -z cmd1 // cmd2
+~~~"
+function testx() {
+ local t__op="$1"; shift
+ local t__val="$(evalx "$@")"
+ [ $t__op "$t__val" ]
+}
+
+function: test2x "Faire une test binaire avec la commande [ entre une valeur spécifiée et une valeur calculée avec evalx.
+
+Utiliser la syntaxe 'test2x value op cmds...' e.g.
+~~~
+test2x value == cmd1 // cmd2
+~~~"
+function test2x() {
+ local t__val1="$1"; shift
+ local t__op="$1"; shift
+ local t__val2="$(evalx "$@")"
+ [ "$t__val1" $t__op "$t__val2" ]
+}
+
+function: testrx "Faire une test binaire avec la commande [[ entre une valeur spécifiée et une valeur calculée avec evalx.
+
+Utiliser la syntaxe 'testrx value op cmds...' e.g.
+~~~
+testrx value == cmd1 // cmd2
+~~~"
+function testrx() {
+ local t__val1="$1"; shift
+ local t__op="$1"; shift
+ local t__val2="$(evalx "$@")"
+ eval '[[ "$t__val1" '"$t__op"' "$t__val2" ]]'
+}
+
+function: testp "Faire un test unaire avec la commande [ sur une valeur calculée avec evalp.
+
+Utiliser la syntaxe 'testp op cmds...' e.g.
+~~~
+testp -z cmd1 // cmd2
+~~~"
+function testp() {
+ local t__op="$1"; shift
+ local t__val="$(evalp "$@")"
+ [ $t__op "$t__val" ]
+}
+
+function: test2p "Faire une test binaire avec la commande [ entre une valeur spécifiée et une valeur calculée avec evalp.
+
+Utiliser la syntaxe 'test2p value op cmds...' e.g.
+~~~
+test2p value == cmd1 // cmd2
+~~~"
+function test2p() {
+ local t__val1="$1"; shift
+ local t__op="$1"; shift
+ local t__val2="$(evalp "$@")"
+ [ "$t__val1" $t__op "$t__val2" ]
+}
+
+function: testrp "Faire une test binaire avec la commande [[ entre une valeur spécifiée et une valeur calculée avec evalp.
+
+Utiliser la syntaxe 'testrp value op cmds...' e.g.
+~~~
+testrp value == cmd1 // cmd2
+~~~"
+function testrp() {
+ local t__val1="$1"; shift
+ local t__op="$1"; shift
+ local t__val2="$(evalp "$@")"
+ eval '[[ "$t__val1" '"$t__op"' "$t__val2" ]]'
+}
diff --git a/lib/nulib/bash/base.init b/lib/nulib/bash/base.init
new file mode 100644
index 0000000..0661a5c
--- /dev/null
+++ b/lib/nulib/bash/base.init
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+##@cooked nocomments
+module: base.init base_ "Fonctions de base: initialiser l'environnement"
+
+if [ -z "$NULIB_NO_INIT_ENV" ]; then
+ # Emplacement du script courant
+ if [ "$0" == "-bash" ]; then
+ MYNAME=
+ MYDIR=
+ MYSELF=
+ elif [ ! -f "$0" -a -f "${0#-}" ]; then
+ MYNAME="$(basename -- "${0#-}")"
+ MYDIR="$(dirname -- "${0#-}")"
+ MYDIR="$(cd "$MYDIR"; pwd)"
+ MYSELF="$MYDIR/$MYNAME"
+ else
+ MYNAME="$(basename -- "$0")"
+ MYDIR="$(dirname -- "$0")"
+ MYDIR="$(cd "$MYDIR"; pwd)"
+ MYSELF="$MYDIR/$MYNAME"
+ fi
+ [ -n "$NULIBDIR" ] || NULIBDIR="$MYDIR"
+
+ # Repertoire temporaire
+ [ -z "$TMPDIR" -a -d "$HOME/tmp" ] && TMPDIR="$HOME/tmp"
+ [ -z "$TMPDIR" ] && TMPDIR="${TMP:-${TEMP:-/tmp}}"
+ export TMPDIR
+
+ # User
+ [ -z "$USER" -a -n "$LOGNAME" ] && export USER="$LOGNAME"
+
+ # Le fichier nulibrc doit être chargé systématiquement
+ [ -f /etc/debian_chroot ] && NULIB_CHROOT=1
+ [ -f /etc/nulibrc ] && . /etc/nulibrc
+ [ -f ~/.nulibrc ] && . ~/.nulibrc
+
+ # Type de système sur lequel tourne le script
+ UNAME_SYSTEM=`uname -s`
+ [ "${UNAME_SYSTEM#CYGWIN}" != "$UNAME_SYSTEM" ] && UNAME_SYSTEM=Cygwin
+ [ "${UNAME_SYSTEM#MINGW32}" != "$UNAME_SYSTEM" ] && UNAME_SYSTEM=Mingw
+ UNAME_MACHINE=`uname -m`
+ if [ -n "$NULIB_CHROOT" ]; then
+ # Dans un chroot, il est possible de forcer les valeurs
+ [ -n "$NULIB_UNAME_SYSTEM" ] && eval "UNAME_SYSTEM=$NULIB_UNAME_SYSTEM"
+ [ -n "$NULIB_UNAME_MACHINE" ] && eval "UNAME_MACHINE=$NULIB_UNAME_MACHINE"
+ fi
+
+ # Nom d'hôte respectivement avec et sans domaine
+ # contrairement à $HOSTNAME, cette valeur peut être spécifiée, comme par ruinst
+ [ -n "$MYHOST" ] || MYHOST="$HOSTNAME"
+ [ -n "$MYHOSTNAME" ] || MYHOSTNAME="${HOSTNAME%%.*}"
+ export MYHOST MYHOSTNAME
+fi
diff --git a/lib/nulib/bash/base.io b/lib/nulib/bash/base.io
new file mode 100644
index 0000000..e274f27
--- /dev/null
+++ b/lib/nulib/bash/base.io
@@ -0,0 +1,1338 @@
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+##@cooked nocomments
+module: base.io base_ "Fonctions de base: affichage et saisie"
+require: base.arr
+
+NULIB__TAB=$'\t'
+NULIB__LATIN1=iso-8859-1
+NULIB__LATIN9=iso-8859-15
+NULIB__UTF8=utf-8
+NULIB_IENC="$NULIB__UTF8"
+NULIB_OENC="$NULIB__UTF8"
+
+if [ ! -x "$(which iconv 2>/dev/null)" ]; then
+ function iconv() { cat; }
+fi
+
+function nulib__lang_encoding() {
+ case "${LANG,,}" in
+ *@euro) echo "iso-8859-15";;
+ *.utf-8|*.utf8) echo "utf-8";;
+ *) echo "iso-8859-1";;
+ esac
+}
+
+function nulib__norm_encoding() {
+ local enc="${1,,}"
+ enc="${enc//[-_]/}"
+ case "$enc" in
+ latin|latin1|iso8859|iso88591|8859|88591) echo "iso-8859-1";;
+ latin9|iso885915|885915) echo "iso-8859-15";;
+ utf|utf8) echo "utf-8";;
+ *) echo "$1";;
+ esac
+}
+
+function nulib__init_encoding() {
+ local DEFAULT_ENCODING="$(nulib__lang_encoding)"
+ [ -n "$DEFAULT_ENCODING" ] || DEFAULT_ENCODING=utf-8
+ [ -n "$NULIB_OUTPUT_ENCODING" ] || NULIB_OUTPUT_ENCODING="$DEFAULT_ENCODING"
+ NULIB_OUTPUT_ENCODING="$(nulib__norm_encoding "$NULIB_OUTPUT_ENCODING")"
+ [ -n "$NULIB_INPUT_ENCODING" ] || NULIB_INPUT_ENCODING="$NULIB_OUTPUT_ENCODING"
+ NULIB_INPUT_ENCODING="$(nulib__norm_encoding "$NULIB_INPUT_ENCODING")"
+ [ -n "$NULIB_EDITOR_ENCODING" ] || NULIB_EDITOR_ENCODING="$NULIB_INPUT_ENCODING"
+ NULIB_EDITOR_ENCODING="$(nulib__norm_encoding "$NULIB_EDITOR_ENCODING")"
+
+ NULIB_IENC="$NULIB_INPUT_ENCODING"
+ NULIB_OENC="$NULIB_OUTPUT_ENCODING"
+}
+[ -n "$NULIB_LANG" -a -z "$LANG" ] && export NULIB_LANG LANG="$NULIB_LANG"
+nulib__init_encoding
+
+function nulib_local() {
+# Afficher les commandes pour rendre locales certaines variables en fonction des
+# arguments:
+# - opts rend locale args, pour utiliser parse_opts() à l'intérieur d'une
+# fonction.
+# - verbosity et interaction rendent respectivement locales NULIB_VERBOSITY et
+# NULIB_INTERACTION. Ceci est utile pour pouvoir appeler sans risque de
+# pollution de l'environnement une fonction qui utilise parse_opts() avec les
+# définitions de PRETTYOPTS.
+# Si aucun arguments n'est fourni, toutes les définitions sont affichées.
+ local arg
+ [ $# -gt 0 ] || set -- opts verbosity interaction
+ for arg in "$@"; do
+ case "$arg" in
+ parse_opts|opts|o|args) echo "local -a args";;
+ verbosity|v) echo "local NULIB_VERBOSITY='$NULIB_VERBOSITY'";;
+ interaction|i) echo "local NULIB_INTERACTION='$NULIB_INTERACTION'";;
+ esac
+ done
+}
+
+function noerror() {
+# lancer la commande "$@" et masquer son code de retour
+ [ $# -gt 0 ] || set :
+ "$@" || return 0
+}
+
+function noout() {
+# lancer la commande "$@" en supprimant sa sortie standard
+ [ $# -gt 0 ] || return 0
+ "$@" >/dev/null
+}
+
+function noerr() {
+# lancer la commande "$@" en supprimant sa sortie d'erreur
+ [ $# -gt 0 ] || return 0
+ "$@" 2>/dev/null
+}
+
+function stdredir() {
+ # Lancer la commande $4..@ en redirigeant stdin depuis $1, stdout vers $2,
+ # stderr vers $3. Si $1 est vide ou vaut /dev/stdin, la redirection n'est
+ # pas faite. Si $2 est vide ou vaut /dev/stdout, la redirection n'est pas
+ # faite. Si $3 est vide ou vaut /dev/stderr, la redirection n'est pas faite.
+ # Cette fonction existe parce que sur certaines versions de bash, il semble
+ # que les redirections /dev/std* ne sont pas traitées de façon particulière.
+ # De plus, sur des technologies telles que OpenVZ, les chemins /dev/std* ne
+ # sont pas créés (parce que /proc/self/fd/* n'est pas accessible). Donc,
+ # dans de rares cas où le script tourne sur OpenVZ avec une version de bash
+ # qui est buggée, la redirection n'est pas faite correctement.
+ local __redirs __in __out __err
+ if [ -n "$1" -o "$1" == /dev/stdin ]; then
+ if [ "${1#<}" != "$1" ]; then
+ __in="${1#<}"
+ else
+ __in="$1"
+ fi
+ __redirs="$__redirs"' <"$__in"'
+ fi; shift
+ if [ -n "$1" -o "$1" == /dev/stdout ]; then
+ if [ "${1#>>}" != "$1" ]; then
+ __out="${1#>>}"
+ __redirs="$__redirs"' >>"$__out"'
+ elif [ "${1#>}" != "$1" ]; then
+ __out="${1#>}"
+ __redirs="$__redirs"' >"$__out"'
+ else
+ __out="$1"
+ __redirs="$__redirs"' >"$__out"'
+ fi
+ fi; shift
+ if [ -n "$1" -o "$1" == /dev/stderr ]; then
+ if [ "${1#>>}" != "$1" ]; then
+ __err="${1#>>}"
+ __redirs="$__redirs"' 2>>"$__err"'
+ elif [ "${1#>}" != "$1" ]; then
+ __err="${1#>}"
+ __redirs="$__redirs"' 2>"$__err"'
+ else
+ __err="$1"
+ __redirs="$__redirs"' 2>"$__err"'
+ fi
+ fi; shift
+ eval '"$@"'"$__redirs"
+}
+
+function isatty() {
+# tester si STDOUT n'est pas une redirection
+ tty -s <&1
+}
+
+function in_isatty() {
+# tester si STDIN n'est pas une redirection
+ tty -s
+}
+
+function out_isatty() {
+# tester si STDOUT n'est pas une redirection. identique à isatty()
+ tty -s <&1
+}
+
+function err_isatty() {
+# tester si STDERR n'est pas une redirection
+ tty -s <&2
+}
+
+################################################################################
+# affichage
+
+function tooenc() {
+# Transformer la valeur $1 de l'encoding $2(=$NULIB_OENC) vers l'encoding de sortie
+# $3=($NULIB_OUTPUT_ENCODING)
+ local src="$1" from="${2:-$NULIB_OENC}" to="${3:-$NULIB_OUTPUT_ENCODING}"
+ if [ "$from" == "$to" ]; then
+ recho "$src"
+ else
+ iconv -f "$from" -t "$to" <<<"$src"
+ fi
+}
+
+function uecho() {
+ tooenc "$*"
+}
+
+function tooenc_() {
+# Transformer la valeur $1 de l'encoding $2(=$NULIB_OENC) vers l'encoding de sortie
+# $3=($NULIB_OUTPUT_ENCODING)
+ local src="$1" from="${2:-$NULIB_OENC}" to="${3:-$NULIB_OUTPUT_ENCODING}"
+ if [ "$from" == "$to" ]; then
+ recho_ "$src"
+ else
+ recho_ "$src" | iconv -f "$from" -t "$to"
+ fi
+}
+
+function uecho_() {
+ tooenc_ "$*"
+}
+
+function stooenc() { ### XXX
+# Transformer la valeur lue sur stdin de $NULIB_OENC vers l'encoding de sortie par
+# défaut ($NULIB_OUTPUT_ENCODING)
+ local from="${1:-$NULIB_OENC}" to="${2:-$NULIB_OUTPUT_ENCODING}"
+ if [ "$from" == "$to" ]; then
+ cat
+ else
+ iconv -f "$from" -t "$to"
+ fi
+}
+
+# faut-il dater les messages de etitle, estep, ebegin?
+# Faire NULIB_EDATE=1 en début de script pour activer cette fonctionnalité
+export NULIB_EDATE
+function __edate() { [ -n "$NULIB_EDATE" ] && date +"[%d/%m/%Y-%H:%M:%S] "; }
+
+export NULIB_ELOG_OVERWRITE
+function __set_no_colors() { :; }
+function elogto() {
+# Activer NULIB_EDATE et rediriger STDOUT et STDERR vers le fichier $1
+# Si deux fichiers sont spécifiés, rediriger STDOUT vers $1 et STDERR vers $2
+# Si aucun fichier n'est spécifié, ne pas faire de redirection
+# Si la redirection est activée, forcer l'utilisation de l'encoding UTF8
+# Si NULIB_ELOG_OVERWRITE=1, alors le fichier en sortie est écrasé. Sinon, les
+# lignes en sortie lui sont ajoutées
+ NULIB_EDATE=1
+ if [ -n "$1" -a -n "$2" ]; then
+ LANG=fr_FR.UTF8
+ NULIB_OUTPUT_ENCODING="$NULIB__UTF8"
+ __set_no_colors 1
+ if [ -n "$NULIB_ELOG_OVERWRITE" ]; then
+ exec >"$1" 2>"$2"
+ else
+ exec >>"$1" 2>>"$2"
+ fi
+ elif [ -n "$1" ]; then
+ LANG=fr_FR.UTF8
+ NULIB_OUTPUT_ENCODING="$NULIB__UTF8"
+ __set_no_colors 1
+ if [ -n "$NULIB_ELOG_OVERWRITE" ]; then
+ exec >"$1" 2>&1
+ else
+ exec >>"$1" 2>&1
+ fi
+ fi
+}
+
+# variables utilisées pour l'affichage indenté des messages et des titres
+# __estack est la liste des invocations de 'ebegin' et 'etitle' en cours
+# __tlevel est l'indentation à appliquer avant d'afficher le message
+export __estack __tlevel
+function __indent() {
+# indenter les lignes de $1, sauf la première
+ if [ "${1/
+/}" != "$1" ]; then
+ sed "2,\$s/^/${__tlevel}/g" <<<"$1"
+ else
+ recho "$1"
+ fi
+}
+# fonctions à surcharger pour modifier la façon dont les messages sont affichés
+function __eerror() { tooenc "$(__edate)${__tlevel}ERROR $(__indent "$1")"; }
+function __ewarn() { tooenc "$(__edate)${__tlevel}WARNING $(__indent "$1")"; }
+function __enote() { tooenc "$(__edate)${__tlevel}NOTE $(__indent "$1")"; }
+function __ebanner() {
+ local maxi="${COLUMNS:-80}"
+ local -a lines
+ local psfix line
+
+ psfix="$(__edate)${__tlevel}"
+ while [ ${#psfix} -lt $maxi ]; do psfix="$psfix="; done
+
+ tooenc "$psfix"
+ maxi=$(($maxi - 1))
+ base_array_xsplitl lines "$1"
+ for line in "" "${lines[@]}" ""; do
+ line="$(__edate)${__tlevel}= $line"
+ if [ ${#line} -le $maxi ]; then
+ while [ ${#line} -lt $maxi ]; do line="$line "; done
+ line="$line="
+ fi
+ tooenc "$line"
+ done
+ tooenc "$psfix"
+}
+function __eimportant() { tooenc "$(__edate)${__tlevel}IMPORTANT $(__indent "$1")"; }
+function __eattention() { tooenc "$(__edate)${__tlevel}ATTENTION $(__indent "$1")"; }
+function __einfo() { tooenc "$(__edate)${__tlevel}INFO $(__indent "$1")"; }
+function __eecho() { tooenc "$(__edate)${__tlevel}$(__indent "$1")"; }
+function __eecho_() { tooenc_ "$(__edate)${__tlevel}$(__indent "$1")"; }
+function __edebug() { tooenc "$(__edate)${__tlevel}DEBUG $(__indent "$1")"; }
+function __estep() { tooenc "$(__edate)${__tlevel}. $(__indent "$1")"; }
+function __estepe() { tooenc "$(__edate)${__tlevel}.E $(__indent "$1")"; }
+function __estepw() { tooenc "$(__edate)${__tlevel}.W $(__indent "$1")"; }
+function __estepn() { tooenc "$(__edate)${__tlevel}.N $(__indent "$1")"; }
+function __estepi() { tooenc "$(__edate)${__tlevel}.I $(__indent "$1")"; }
+function __estep_() { tooenc_ "$(__edate)${__tlevel}. $(__indent "$1")"; }
+function __estepe_() { tooenc_ "$(__edate)${__tlevel}.E $(__indent "$1")"; }
+function __estepw_() { tooenc_ "$(__edate)${__tlevel}.W $(__indent "$1")"; }
+function __estepn_() { tooenc_ "$(__edate)${__tlevel}.N $(__indent "$1")"; }
+function __estepi_() { tooenc_ "$(__edate)${__tlevel}.I $(__indent "$1")"; }
+function __etitle() { tooenc "$(__edate)${__tlevel}=== $(__indent "$1")"; }
+function __ebegin() { tooenc_ "$(__edate)${__tlevel}. $(__indent "$1"): "; }
+function __edoto() { echo_ "."; }
+function __edotw() { echo_ "w"; }
+function __edotx() { echo_ "x"; }
+function __edotp() { echo_ "+"; }
+function __edotd() { tooenc "($1)"; }
+function __eendo() { echo "[ok]"; }
+function __eendx() { echo "[error]"; }
+PRETTYOPTS=()
+function set_verbosity() { :;}
+function set_interaction() { :;}
+function show_error() {
+# tester respectivement si on doit afficher les messages d'erreur,
+# d'avertissement, d'information, de debug
+ return 0
+}
+function show_warn() {
+ return 0
+}
+function show_info() {
+ return 0
+}
+function show_verbose() {
+ return 0
+}
+function show_debug() {
+ [ -n "$DEBUG" ]
+}
+function check_verbosity() {
+ return 0
+}
+function get_verbosity_option() { :;}
+function check_interaction() {
+ return 0
+}
+
+# note: toutes les fonctions d'affichage e* écrivent sur stderr
+__epending=
+function eflush() {
+# Afficher les messages en attente
+ if [ -n "$__epending" ]; then recho "$__epending" 1>&2; __epending=; fi
+}
+function eclearp() {
+# Supprimer les message en attente
+ __epending=
+}
+function eerror() {
+# Afficher un message d'erreur
+ show_error || return; eflush; __eerror "$*" 1>&2
+}
+
+function die() {
+ [ $# -gt 0 ] && base_eerror "$@"
+ exit 1
+}
+
+function exit_with {
+ if [ $# -gt 0 ]; then "$@"; fi
+ exit $?
+}
+
+function die_with {
+ [ $# -gt 0 ] && base_eerror "$1"
+ shift
+ [ $# -gt 0 ] && "$@"
+ exit 1
+}
+
+function die_unless() {
+ # Afficher $1 et quitter le script avec die() si la commande $2..@ retourne FAUX
+ local du__r
+ local du__msg="$1"; shift
+ if [ $# -eq 0 ]; then
+ [ -n "$du__msg" ] && base__eerror "$du__msg"
+ exit 1
+ elif "$@"; then
+ :
+ else
+ du__r=$?
+ [ -n "$du__msg" ] && base__eerror "$du__msg"
+ exit $du__r
+ fi
+ return 0
+}
+
+function eerror_unless() {
+ # Afficher $1 avec base_eerror() si la commande $2..@ retourne FAUX. dans tous les cas, retourner le code de retour de la commande.
+ local eu__r
+ local eu__msg="$1"; shift
+ if [ $# -eq 0 ]; then
+ [ -n "$eu__msg" ] && base__eerror "$eu__msg"
+ return 1
+ elif "$@"; then
+ :
+ else
+ eu__r=$?
+ [ -n "$eu__msg" ] && base__eerror "$eu__msg"
+ return $eu__r
+ fi
+ return 0
+}
+
+function die_if() {
+ # Afficher $1 et quitter le script avec die() si la commande $2..@ retourne VRAI. sinon, retourner le code de retour de la commande
+ local di__r=0
+ local di__msg="$1"; shift
+ [ $# -eq 0 ] && return 0
+ if "$@"; then
+ [ -n "$di__msg" ] && base__eerror "$di__msg"
+ exit 0
+ else
+ di__r=$?
+ fi
+ return $di__r
+}
+
+function eerror_if() {
+ # Afficher $1 avec base_eerror() si la commande $2..@ retourne VRAI. dans tous les cas, retourner le code de retour de la commande.
+ local ei__r=0
+ local ei__msg="$1"; shift
+ [ $# -eq 0 ] && return 0
+ if "$@"; then
+ [ -n "$ei__msg" ] && base__eerror "$ei__msg"
+ else
+ ei__r=$?
+ fi
+ return $ei__r
+}
+
+function ewarn() {
+# Afficher un message d'avertissement
+ show_warn || return; eflush; __ewarn "$*" 1>&2
+}
+function enote() {
+# Afficher un message d'information de même niveau qu'un avertissement
+ show_info || return; eflush; __enote "$*" 1>&2
+}
+function ebanner() {
+# Afficher un message très important encadré, puis attendre 5 secondes
+ show_error || return; eflush; __ebanner "$*" 1>&2; sleep 5
+}
+function eimportant() {
+# Afficher un message très important
+ show_error || return; eflush; __eimportant "$*" 1>&2
+}
+function eattention() {
+# Afficher un message important
+ show_warn || return; eflush; __eattention "$*" 1>&2
+}
+function einfo() {
+# Afficher un message d'information
+ show_info || return; eflush; __einfo "$*" 1>&2
+}
+function eecho() {
+# Afficher un message d'information sans préfixe
+ show_info || return; eflush; __eecho "$*" 1>&2
+}
+function eecho_() {
+ show_info || return; eflush; __eecho_ "$*" 1>&2
+}
+function edebug() {
+# Afficher un message de debug
+ show_debug || return; eflush; __edebug "$*" 1>&2
+}
+function trace() {
+# Afficher la commande $1..@, la lancer, puis afficher son code d'erreur si une
+# erreur se produit
+ local r cmd="$(qvals "$@")"
+ show_info && { eflush; __eecho "\$ $cmd" 1>&2; }
+ "$@"; r=$?
+ if [ $r -ne 0 ]; then
+ if show_info; then
+ eflush; __eecho "^ [EC #$r]" 1>&2
+ elif show_error; then
+ eflush; __eecho "^ $cmd [EC #$r]" 1>&2;
+ fi
+ fi
+ return $r
+}
+function trace_error() {
+# Lancer la commande $1..@, puis afficher son code d'erreur si une erreur se
+# produit. La différence avec trace() est que la commande n'est affichée que si
+# une erreur se produit.
+ local r
+ "$@"; r=$?
+ if [ $r -ne 0 ]; then
+ local cmd="$(qvals "$@")"
+ show_error && { eflush; __eecho "^ $cmd [EC #$r]" 1>&2; }
+ fi
+ return $r
+}
+
+function etitle() {
+# Afficher le titre $1, qui est le début éventuel d'une section. Les section
+# imbriquées sont affichées indentées. La section n'est pas terminée, et il faut
+# la terminer explicitement avec eend, sauf dans certains cas précis:
+# - Si $2..$* est spécifié, c'est une commande. Lancer la commande dans le
+# contexte de la section. Puis, la section est automatiquement terminée sauf si
+# l'option -s est spécifiée, auquel cas la section reste ouverte. Si l'option -p
+# est spécifiée, eclearp() est appelé pour purger les messages en attente
+# - Dans le cas contraire, l'option -s est ignorée: la section doit toujours
+# être terminée explicitement.
+# La fonction etitled() est comme etitle(), mais le titre n'est pas affiché
+# immédiatement. L'affichage effectif est effectué dès qu'une fonction e* est
+# utilisée. Ceci permet, avec la fonction eclearp(), de ne pas afficher de titre
+# pour une section vide.
+ local __t_deferred=
+ __t_etitle "$@"
+}
+function etitled() {
+ local __t_deferred=1
+ __t_etitle "$@"
+}
+function __t_etitle() {
+ local __t_eend=default
+ local __t_clearp=
+ while [ -n "$1" ]; do
+ if [ "$1" == "--" ]; then
+ shift
+ break
+ elif [ "$1" == "-s" ]; then
+ __t_eend=
+ shift
+ elif [ "$1" == "--eend" ]; then
+ __t_eend=1
+ shift
+ elif [ "$1" == "-p" ]; then
+ __t_clearp=1
+ shift
+ else
+ break
+ fi
+ done
+ local __t_title="$1"; shift
+ local __t_s=0
+ # etitle
+ [ -n "$__estack" ] && __tlevel="${__tlevel} "
+ __estack="$__estack:t"
+ if show_info; then
+ if [ -n "$__t_deferred" ]; then
+ __epending="${__epending:+$__epending
+}$(__etitle "$__t_title")"
+ else
+ eflush
+ __etitle "$__t_title" 1>&2
+ fi
+ fi
+ # commande
+ if [ $# -gt 0 ]; then
+ "$@"
+ __t_s=$?
+ [ "$__t_eend" == "default" ] && __t_eend=1
+ fi
+ # eend
+ [ "$__t_eend" == "default" ] && __t_eend=
+ if [ -n "$__t_eend" ]; then
+ eend $__t_s
+ [ -n "$__t_clearp" ] && eclearp
+ fi
+ return $__t_s
+}
+function estep() {
+# Afficher la description d'une opération. Cette fonction est particulièrement
+# appropriée dans le contexte d'un etitle.
+# Les variantes e (error), w (warning), n (note), i (info) permettent d'afficher
+# des couleurs différentes, mais toutes sont du niveau info.
+ show_info || return; eflush; __estep "$*" 1>&2
+}
+function estepe() {
+ show_info || return; eflush; __estepe "$*" 1>&2
+}
+function estepw() {
+ show_info || return; eflush; __estepw "$*" 1>&2
+}
+function estepn() {
+ show_info || return; eflush; __estepn "$*" 1>&2
+}
+function estepi() {
+ show_info || return; eflush; __estepi "$*" 1>&2
+}
+function estep_() {
+ show_info || return; eflush; __estep_ "$*" 1>&2
+}
+function estepe_() {
+ show_info || return; eflush; __estepe_ "$*" 1>&2
+}
+function estepw_() {
+ show_info || return; eflush; __estepw_ "$*" 1>&2
+}
+function estepn_() {
+ show_info || return; eflush; __estepn_ "$*" 1>&2
+}
+function estepi_() {
+ show_info || return; eflush; __estepi_ "$*" 1>&2
+}
+function ebegin() {
+# Afficher le message $1, qui décrit le début d'une opération. Cette fonction
+# débute une section, qu'il faut terminer avec eend.
+# Si $2..$* est spécifié, c'est une commande. Lancer la commande dans le
+# contexte de la section. Puis, la section est terminée automatiquement, sauf si
+# l'option -s est spécifiée, auquel cas la section reste ouverte.
+ local __b_eend=default
+ while [ -n "$1" ]; do
+ if [ "$1" == "--" ]; then
+ shift
+ break
+ elif [ "$1" == "-s" ]; then
+ __b_eend=
+ shift
+ elif [ "$1" == "--eend" ]; then
+ __b_eend=1
+ shift
+ else
+ break
+ fi
+ done
+ local __b_msg="$1"; shift
+ local __b_s=0
+ # ebegin
+ __estack="$__estack:b"
+ if show_info; then
+ eflush
+ __ebegin "$__b_msg" 1>&2
+ fi
+ # commande
+ if [ $# -gt 0 ]; then
+ "$@"
+ __b_s=$?
+ [ "$__b_eend" == "default" ] && __b_eend=1
+ fi
+ # eend
+ [ "$__b_eend" == "default" ] && __b_eend=
+ [ -n "$__b_eend" ] && eend $__b_s
+ return $__b_s
+}
+function edot() {
+# Afficher une étape d'une opération, matérialisée par un point '.' ou une
+# croix 'x' en cas de succès ou d'erreur. Cette fonction est particulièrement
+# appropriée dans le contexte d'un ebegin.
+ local s=$?
+ show_info || return
+ eflush
+ [ -n "$1" ] && s="$1"
+ shift
+ if [ "$s" == "0" ]; then
+ __edoto 1>&2
+ else
+ __edotx 1>&2
+ fi
+ show_verbose && [ $# -gt 0 ] && __edotd "$*" 1>&2
+ return $s
+}
+function edotw() {
+# Afficher un avertissement comme étape d'une opération, matérialisée par une
+# lettre 'w' (typiquement de couleur jaune). Cette fonction est particulièrement
+# appropriée dans le contexte d'un ebegin.
+ local s=$?
+ show_info || return
+ eflush
+ [ -n "$1" ] && s="$1"
+ shift
+ __edotw 1>&2
+ show_verbose && [ $# -gt 0 ] && __edotd "$*" 1>&2
+ return $s
+}
+function ewait() {
+# Afficher les étapes d'une opération qui dure, matérialisées par des '+' toutes
+# les secondes tant que le processus $1 tourne.
+# A utiliser de cette manière:
+# ebegin "msg"
+# cmd &
+# ewait $!
+# eend
+ [ -n "$1" ] || return 1
+ if show_info; then
+ local count=2
+ eflush
+ little_sleep # certains processus retournent tout de suite
+ while is_running "$1"; do
+ sleep 1
+ if [ $count -gt 0 ]; then
+ # attendre 2 secondes avant de commencer à afficher des '+'
+ count=$(($count - 1))
+ else
+ __edotp 1>&2
+ fi
+ done
+ # terminer par un '.'
+ __edoto 1>&2
+ else
+ # ne rien afficher, mais attendre quand même la fin de l'opération
+ wait "$1"
+ fi
+}
+function eend() {
+# Terminer une section.
+# Avec l'option -c, remettre à zéro toutes les informations de section
+# Si la section en cours est un ebegin, afficher la fin de l'opération: [ok] ou
+# [error] en fonction du code de retour de la dernière commande (ou de $1 si
+# cette valeur est donnée)
+# Si la section en cours est un etitle, marquer la fin de la section concernée
+# par le titre.
+ local s=$?
+ if [ "$1" == "-c" ]; then
+ __estack=
+ __tlevel=
+ elif [ "${__estack%:b}" != "$__estack" ]; then
+ # terminer ebegin
+ __estack="${__estack%:b}"
+ show_info || return
+ eflush
+ [ -n "$1" ] && s="$1"
+ if [ "$s" == "0" ]; then
+ __eendo 1>&2
+ else
+ __eendx 1>&2
+ fi
+ elif [ "${__estack%:t}" != "$__estack" ]; then
+ # terminer etitle -s
+ __estack="${__estack%:t}"
+ __tlevel="${__tlevel% }"
+ fi
+}
+function __elinedots() {
+ ebegin "$1"
+ local line
+ if show_debug; then
+ while read line; do
+ __edoto 1>&2
+ __edotd "$line" 1>&2
+ done
+ else
+ while read line; do
+ __edoto 1>&2
+ done
+ fi
+ eend
+}
+function elinedots() {
+# Afficher un message comme avec ebegin "$1", puis afficher un point '.' pour
+# chaque ligne lue sur stdin. Cela permet de suivre une opération. En mode
+# DEBUG, afficher la ligne affichée plutôt qu'un point.
+# Si $2..$* sont spécifiés, lancer la commande et suivre sa sortie. Ainsi,
+# 'elinedots msg cmd args' est un raccourci pour 'cmd args | elinedots msg'
+ local msg="$1"; shift
+ if [ $# -gt 0 ]; then
+ "$@" | __elinedots "$msg"
+ else
+ __elinedots "$msg"
+ fi
+}
+
+################################################################################
+# saisie
+
+function toienc() {
+# Transformer la valeur de la variable $1 de l'encoding d'entrée
+# $3(=$NULIB_INPUT_ENCODING) vers l'encoding $2(=$NULIB_IENC)
+ local __tie_var="$1" __tie_to="${2:-$NULIB_IENC}" __tie_from="${3:-$NULIB_INPUT_ENCODING}"
+ if [ "$__tie_from" != "$__tie_to" ]; then
+ _setv "$__tie_var" "$(iconv -f "$__tie_from" -t "$__tie_to" <<<"${!__tie_var}")"
+ fi
+}
+
+function uread() {
+# Lire une valeur sur stdin et la placer dans la variable $1. On assume que la
+# valeur en entrée est encodée dans l'encoding d'entrée par défaut
+ [ $# -gt 0 ] || set -- REPLY
+ local __r_var
+ read "$@"
+ for __r_var in "$@"; do
+ [ -z "$__r_var" -o "${__r_var:0:1}" == "-" ] && continue # ignorer les options
+ toienc "$__r_var"
+ done
+}
+
+function stoienc() { ### XXX
+# Transformer la valeur lue sur stdin de $NULIB_IENC vers l'encoding d'entrée par
+# défaut ($NULIB_INPUT_ENCODING)
+ local to="${1:-$NULIB_IENC}" from="${2:-$NULIB_INPUT_ENCODING}"
+ if [ "$from" == "$to" ]; then
+ cat
+ else
+ iconv -f "$from" -t "$to"
+ fi
+}
+
+
+
+function is_interaction() {
+ return 1
+}
+
+function get_interaction_option() { :;}
+
+function ask_yesno() {
+# Afficher le message $1 suivi de [oN] ou [On] suivant que $2 vaut O ou N, puis
+# lire la réponse. Retourner 0 si la réponse est vrai, 1 sinon.
+# Si $1 est une option, elle est utilisée avec check_interaction pour savoir si
+# on est en mode interactif ou non. A ce moment-là, les valeurs sont décalées
+# ($2=message, $3=default)
+# Si $2 vaut C, la valeur par défaut est N si on est interactif, O sinon
+# Si $2 vaut X, la valeur par défaut est O si on est interactif, N sinon
+ local interactive=1
+ if [[ "$1" == -* ]]; then
+ if [ "$1" != -- ]; then
+ check_interaction "$1" || interactive=
+ fi
+ shift
+ else
+ check_interaction -c || interactive=
+ fi
+ local default="${2:-N}"
+ if [ "$default" == "C" ]; then
+ [ -n "$interactive" ] && default=N || default=O
+ elif [ "$default" == "X" ]; then
+ [ -n "$interactive" ] && default=O || default=N
+ fi
+ if [ -n "$interactive" ]; then
+ eflush
+ local message="$1"
+ local prompt="[oN]"
+ local r
+ is_yes "$default" && prompt="[On]"
+ if [ -n "$message" ]; then
+ __eecho_ "$message" 1>&2
+ else
+ NULIB_OENC="$NULIB__UTF8" __eecho_ "Voulez-vous continuer?" 1>&2
+ fi
+ NULIB_OENC="$NULIB__UTF8" tooenc_ " $prompt " 1>&2
+ uread r
+ is_yes "${r:-$default}"
+ else
+ is_yes "$default"
+ fi
+}
+
+function ask_any() {
+# Afficher le message $1 suivi du texte "[$2]" (qui vaut par défaut +Oq), puis
+# lire la réponse. Les lettres de la chaine de format $2 sont numérotées de 0 à
+# $((${#2} - 1)). Le code de retour est le numéro de la lettre qui a été
+# sélectionnée. Cette fonction est une généralisation de ask_yesno() pour
+# n'importe quel ensemble de lettres.
+# La première lettre en majuscule est la lettre sélectionnée par défaut.
+# La lettre O matche toutes les lettres qui signifient oui: o, y, 1, v, t
+# La lettre N matche toutes les lettres qui signifient non: n, f, 0
+# Il y a des raccourcis:
+# +O --> On
+# +N --> oN
+# +C --> oN si on est en mode interactif, On sinon
+# +X --> On si on est en mode interactifn oN sinon
+# Si $1 est une option, elle est utilisée avec check_interaction pour savoir si
+# on est en mode interactif ou non. A ce moment-là, les valeurs sont décalées
+# ($2=message, $3=format)
+ local interactive=1
+ if [[ "$1" == -* ]]; then
+ if [ "$1" != -- ]; then
+ check_interaction "$1" || interactive=
+ fi
+ shift
+ else
+ check_interaction -c || interactive=
+ fi
+ local format="${2:-+Oq}"
+ format="${format/+O/On}"
+ format="${format/+N/oN}"
+ if [ -n "$interactive" ]; then
+ format="${format/+C/oN}"
+ format="${format/+X/On}"
+ else
+ format="${format/+C/On}"
+ format="${format/+X/oN}"
+ fi
+ local i count="${#format}"
+
+ if [ -n "$interactive" ]; then
+ eflush
+ local message="${1:-Voulez-vous continuer?}"
+ local prompt="[$format]"
+ local r f lf defi
+ while true; do
+ __eecho_ "$message $prompt " 1>&2
+ uread r
+ r="$(strlower "${r:0:1}")"
+ i=0; defi=
+ while [ $i -lt $count ]; do
+ f="${format:$i:1}"
+ lf="$(strlower "$f")"
+ [ "$r" == "$lf" ] && return $i
+ if [ -z "$defi" ]; then
+ [ -z "${f/[A-Z]/}" ] && defi="$i"
+ fi
+ if [ "$lf" == o ]; then
+ case "$r" in o|y|1|v|t) return $i;; esac
+ elif [ "$lf" == n ]; then
+ case "$r" in n|f|0) return $i;; esac
+ fi
+ i=$(($i + 1))
+ done
+ [ -z "$r" ] && return ${defi:-0}
+ done
+ else
+ i=0
+ while [ $i -lt $count ]; do
+ f="${format:$i:1}"
+ [ -z "${f/[A-Z]/}" ] && return $i
+ i=$(($i + 1))
+ done
+ return 0
+ fi
+}
+
+function read_value() {
+# Afficher le message $1 suivi de la valeur par défaut [$3] si elle est non
+# vide, puis lire la valeur donnée par l'utilisateur. Cette valeur doit être non
+# vide si $4(=O) est vrai. La valeur saisie est placée dans la variable
+# $2(=value)
+# Si $1 est une option, elle est utilisée avec check_interaction pour savoir si
+# on est en mode interactif ou non. A ce moment-là, les valeurs sont décalées
+# ($2=message, $3=variable, $4=default, $5=required)
+# En mode non interactif, c'est la valeur par défaut qui est sélectionnée. Si
+# l'utilisateur requière que la valeur soit non vide et que la valeur par défaut
+# est vide, afficher un message d'erreur et retourner faux
+# read_password() est comme read_value(), mais la valeur saisie n'est pas
+# affichée, ce qui la rend appropriée pour la lecture d'un mot de passe.
+ local -a __rv_opts __rv_readline=1 __rv_showdef=1 __rv_nl=
+ __rv_opts=()
+ [ -n "$NULIB_NO_READLINE" ] && __rv_readline=
+ __rv_read "$@"
+}
+
+function read_password() {
+ local -a __rv_opts __rv_readline= __rv_showdef= __rv_nl=1
+ __rv_opts=(-s)
+ __rv_read "$@"
+}
+
+function __rv_read() {
+ local __rv_int=1
+ if [[ "$1" == -* ]]; then
+ if [ "$1" != -- ]; then
+ check_interaction "$1" || __rv_int=
+ fi
+ shift
+ else
+ check_interaction -c || __rv_int=
+ fi
+ local __rv_msg="$1" __rv_v="${2:-value}" __rv_d="$3" __rv_re="${4:-O}"
+ if [ -z "$__rv_int" ]; then
+ # En mode non interactif, retourner la valeur par défaut
+ if is_yes "$__rv_re" && [ -z "$__rv_d" ]; then
+ NULIB_OENC="$NULIB__UTF8" eerror "La valeur par défaut de $__rv_v doit être non vide"
+ return 1
+ fi
+ _setv "$__rv_v" "$__rv_d"
+ return 0
+ fi
+
+ eflush
+ local __rv_r
+ while true; do
+ if [ -n "$__rv_msg" ]; then
+ __eecho_ "$__rv_msg" 1>&2
+ else
+ NULIB_OENC="$NULIB__UTF8" __eecho_ "Entrez la valeur" 1>&2
+ fi
+ if [ -n "$__rv_readline" ]; then
+ NULIB_OENC="$NULIB__UTF8" tooenc_ ": " 1>&2
+ uread -e ${__rv_d:+-i"$__rv_d"} "${__rv_opts[@]}" __rv_r
+ else
+ if [ -n "$__rv_d" ]; then
+ if [ -n "$__rv_showdef" ]; then
+ tooenc_ " [$__rv_d]" 1>&2
+ else
+ tooenc_ " [****]" 1>&2
+ fi
+ fi
+ NULIB_OENC="$NULIB__UTF8" tooenc_ ": " 1>&2
+ uread "${__rv_opts[@]}" __rv_r
+ [ -n "$__rv_nl" ] && echo
+ fi
+ __rv_r="${__rv_r:-$__rv_d}"
+ if [ -n "$__rv_r" ] || ! is_yes "$__rv_re"; then
+ _setv "$__rv_v" "$__rv_r"
+ return 0
+ fi
+ done
+}
+
+function simple_menu() {
+# Afficher un menu simple dont les éléments sont les valeurs du tableau
+# $2(=options). L'option choisie est placée dans la variable $1(=option)
+# -t TITLE: spécifier le titre du menu
+# -m YOUR_CHOICE: spécifier le message d'invite pour la sélection de l'option
+# -d DEFAULT: spécifier l'option par défaut. Par défaut, prendre la valeur
+# actuelle de la variable $1(=option)
+ local __sm_title= __sm_yourchoice= __sm_default=
+ local -a __sm_args
+ parse_opts -t: __sm_title= -m: __sm_yourchoice= -d: __sm_default= @ __sm_args -- "$@" &&
+ set -- "${__sm_args[@]}" || ewarn "$__sm_args"
+
+ local __sm_option_var="${1:-option}" __sm_options_var="${2:-options}"
+ local __sm_option __sm_options
+ __sm_options="$__sm_options_var[*]"
+ if [ -z "${!__sm_options}" ]; then
+ NULIB_OENC="$NULIB__UTF8" eerror "Le tableau $__sm_options_var doit être non vide"
+ return 1
+ fi
+ [ -z "$__sm_default" ] && __sm_default="${!__sm_option_var}"
+
+ eflush
+ base_array_copy __sm_options "$__sm_options_var"
+ local __sm_c=0 __sm_i __sm_choice
+ while true; do
+ if [ "$__sm_c" == "0" ]; then
+ # Afficher le menu
+ [ -n "$__sm_title" ] && __eecho "=== $__sm_title ===" 1>&2
+ __sm_i=1
+ for __sm_option in "${__sm_options[@]}"; do
+ if [ "$__sm_option" == "$__sm_default" ]; then
+ __eecho "$__sm_i*- $__sm_option" 1>&2
+ else
+ __eecho "$__sm_i - $__sm_option" 1>&2
+ fi
+ let __sm_i=$__sm_i+1
+ done
+ fi
+
+ # Afficher les choix
+ if [ -n "$__sm_yourchoice" ]; then
+ __eecho_ "$__sm_yourchoice" 1>&2
+ else
+ NULIB_OENC="$NULIB__UTF8" __eecho_ "Entrez le numéro de l'option choisie" 1>&2
+ fi
+ NULIB_OENC="$NULIB__UTF8" tooenc_ ": " 1>&2
+ uread __sm_choice
+
+ # Valeur par défaut
+ if [ -z "$__sm_choice" -a -n "$__sm_default" ]; then
+ __sm_option="$__sm_default"
+ break
+ fi
+ # Vérifier la saisie
+ if [ -n "$__sm_choice" -a -z "${__sm_choice//[0-9]/}" ]; then
+ if [ "$__sm_choice" -gt 0 -a "$__sm_choice" -le "${#__sm_options[*]}" ]; then
+ __sm_option="${__sm_options[$(($__sm_choice - 1))]}"
+ break
+ else
+ NULIB_OENC="$NULIB__UTF8" eerror "Numéro d'option incorrect"
+ fi
+ else
+ NULIB_OENC="$NULIB__UTF8" eerror "Vous devez saisir le numéro de l'option choisie"
+ fi
+
+ let __sm_c=$__sm_c+1
+ if [ "$__sm_c" -eq 5 ]; then
+ # sauter une ligne toutes les 4 tentatives
+ NULIB_OENC="$NULIB__UTF8" tooenc "" 1>&2
+ __sm_c=0
+ fi
+ done
+ _setv "$__sm_option_var" "$__sm_option"
+}
+
+function actions_menu() {
+# Afficher un menu dont les éléments sont les valeurs du tableau $4(=options),
+# et une liste d'actions tirées du tableau $3(=actions). L'option choisie est
+# placée dans la variable $2(=option). L'action choisie est placée dans la
+# variable $1(=action)
+# Un choix est saisi sous la forme [action]num_option
+# -t TITLE: spécifier le titre du menu
+# -m OPT_YOUR_CHOICE: spécifier le message d'invite pour la sélection de
+# l'action et de l'option
+# -M ACT_YOUR_CHOICE: spécifier le message d'invite dans le cas où aucune option
+# n'est disponible. Dans ce cas, seules les actions vides sont possibles.
+# -e VOID_ACTION: spécifier qu'une action est vide, c'est à dire qu'elle ne
+# requière pas d'être associée à une option. Par défaut, la dernière action
+# est classée dans cette catégorie puisque c'est l'action "quitter"
+# -d DEFAULT_ACTION: choisir l'action par défaut. par défaut, c'est la première
+# action.
+# -q QUIT_ACTION: choisir l'option "quitter" qui provoque la sortie du menu sans
+# choix. par défaut, c'est la dernière action.
+# -o DEFAULT_OPTION: choisir l'option par défaut. par défaut, prendre la valeur
+# actuelle de la variable $2(=option)
+ local -a __am_action_descs __am_options __am_void_actions
+ local __am_tmp __am_select_action __am_select_option __am_title __am_optyc __am_actyc
+ local __am_default_action=auto __am_quit_action=auto
+ local __am_default_option=
+ local -a __am_args
+ parse_opts \
+ -t: __am_title= \
+ -m: __am_optyc= \
+ -M: __am_actyc= \
+ -e: __am_void_actions \
+ -d: __am_default_action= \
+ -q: __am_quit_action= \
+ -o: __am_default_option= \
+ @ __am_args -- "$@" && set -- "${__am_args[@]}" || { eerror "$__am_args"; return 1; }
+
+ __am_tmp="${1:-action}"; __am_select_action="${!__am_tmp}"
+ __am_tmp="${2:-option}"; __am_select_option="${!__am_tmp}"
+ [ -n "$__am_default_option" ] && __am_select_option="$__am_default_option"
+ base_array_copy __am_action_descs "${3:-actions}"
+ base_array_copy __am_options "${4:-options}"
+
+ eerror_unless [ ${#__am_action_descs[*]} -gt 0 ] "Vous devez spécifier le tableau des actions" || return
+ __actions_menu || return 1
+ _setv "${1:-action}" "$__am_select_action"
+ _setv "${2:-option}" "$__am_select_option"
+}
+
+function __actions_menu() {
+ local title="$__am_title"
+ local optyc="$__am_optyc" actyc="$__am_actyc"
+ local default_action="$__am_default_action"
+ local quit_action="$__am_quit_action"
+ local select_action="$__am_select_action"
+ local select_option="$__am_select_option"
+ local -a action_descs options void_actions
+ base_array_copy action_descs __am_action_descs
+ base_array_copy options __am_options
+ base_array_copy void_actions __am_void_actions
+
+ # Calculer la liste des actions valides
+ local no_options
+ base_array_isempty options && no_options=1
+
+ local -a actions
+ local tmp action name
+ for tmp in "${action_descs[@]}"; do
+ splitfsep2 "$tmp" : action name
+ [ -n "$action" ] || action="${name:0:1}"
+ action="$(strlower "$action")"
+ base_array_addu actions "$action"
+ done
+
+ # Calculer l'action par défaut
+ if [ "$default_action" == auto ]; then
+ # si action par défaut non spécifiée, alors prendre la première action
+ default_action="$select_action"
+ if [ -n "$default_action" ]; then
+ base_array_contains actions "$default_action" || default_action=
+ fi
+ [ -n "$default_action" ] || default_action="${actions[0]}"
+ fi
+ default_action="${default_action:0:1}"
+ default_action="$(strlower "$default_action")"
+
+ # Calculer l'action quitter par défaut
+ if [ "$quit_action" == auto ]; then
+ # si action par défaut non spécifiée, alors prendre la dernière action,
+ # s'il y a au moins 2 actions
+ if [ ${#actions[*]} -gt 1 ]; then
+ quit_action="${actions[@]:$((-1)):1}"
+ base_array_addu void_actions "$quit_action"
+ fi
+ fi
+ quit_action="${quit_action:0:1}"
+ quit_action="$(strlower "$quit_action")"
+
+ # Calculer la ligne des actions à afficher
+ local action_title
+ for tmp in "${action_descs[@]}"; do
+ splitfsep2 "$tmp" : action name
+ [ -n "$action" ] || action="${name:0:1}"
+ [ -n "$name" ] || name="$action"
+ action="$(strlower "$action")"
+ if [ -n "$no_options" ]; then
+ if ! base_array_contains void_actions "$action"; then
+ base_array_del actions "$action"
+ continue
+ fi
+ fi
+ [ "$action" == "$default_action" ] && name="$name*"
+ action_title="${action_title:+$action_title/}$name"
+ done
+ if [ -n "$default_action" ]; then
+ # si action par défaut invalide, alors pas d'action par défaut
+ base_array_contains actions "$default_action" || default_action=
+ fi
+ if [ -n "$quit_action" ]; then
+ # si action quitter invalide, alors pas d'action quitter
+ base_array_contains actions "$quit_action" || quit_action=
+ fi
+
+ # Type de menu
+ if [ -n "$no_options" ]; then
+ if base_array_isempty void_actions; then
+ eerror "Aucune option n'est définie. Il faut définir le tableau des actions vides"
+ return 1
+ fi
+ __void_actions_menu
+ else
+ __options_actions_menu
+ fi
+}
+
+function __void_actions_menu() {
+ eflush
+ local c=0 choice
+ while true; do
+ if [ $c -eq 0 ]; then
+ [ -n "$title" ] && __etitle "$title" 1>&2
+ __eecho_ "=== Actions disponibles: " 1>&2
+ tooenc "$action_title" 1>&2
+ fi
+ if [ -n "$actyc" ]; then
+ __eecho_ "$actyc" 1>&2
+ elif [ -n "$optyc" ]; then
+ __eecho_ "$optyc" 1>&2
+ else
+ __eecho_ "Entrez l'action à effectuer" 1>&2
+ fi
+ tooenc_ ": " 1>&2
+ uread choice
+ if [ -z "$choice" -a -n "$default_action" ]; then
+ select_action="$default_action"
+ break
+ fi
+
+ # vérifier la saisie
+ choice="${choice:0:1}"
+ choice="$(strlower "$choice")"
+ if base_array_contains actions "$choice"; then
+ select_action="$choice"
+ break
+ elif [ -n "$choice" ]; then
+ eerror "$choice: action incorrecte"
+ else
+ eerror "vous devez saisir l'action à effectuer"
+ fi
+ let c=$c+1
+ if [ $c -eq 5 ]; then
+ # sauter une ligne toutes les 4 tentatives
+ tooenc "" 1>&2
+ c=0
+ fi
+ done
+ __am_select_action="$select_action"
+ __am_select_option=
+}
+
+function __options_actions_menu() {
+ eflush
+ local c=0 option choice action option
+ while true; do
+ if [ $c -eq 0 ]; then
+ [ -n "$title" ] && __etitle "$title" 1>&2
+ i=1
+ for option in "${options[@]}"; do
+ if [ "$option" == "$select_option" ]; then
+ tooenc "$i*- $option" 1>&2
+ else
+ tooenc "$i - $option" 1>&2
+ fi
+ let i=$i+1
+ done
+ __estepn_ "Actions disponibles: " 1>&2
+ tooenc "$action_title" 1>&2
+ fi
+ if [ -n "$optyc" ]; then
+ __eecho_ "$optyc" 1>&2
+ else
+ __eecho_ "Entrez l'action et le numéro de l'option choisie" 1>&2
+ fi
+ tooenc_ ": " 1>&2
+ uread choice
+
+ # vérifier la saisie
+ if [ -z "$choice" -a -n "$default_action" ]; then
+ action="$default_action"
+ if base_array_contains void_actions "$action"; then
+ select_action="$action"
+ select_option=
+ break
+ elif [ -n "$select_option" ]; then
+ select_action="$action"
+ break
+ fi
+ fi
+ action="${choice:0:1}"
+ action="$(strlower "$action")"
+ if base_array_contains actions "$action"; then
+ # on commence par un code d'action valide. cool :-)
+ if base_array_contains void_actions "$action"; then
+ select_action="$action"
+ select_option=
+ break
+ else
+ option="${choice:1}"
+ option="${option// /}"
+ if [ -z "$option" -a -n "$select_option" ]; then
+ select_action="$action"
+ break
+ elif [ -z "$option" ]; then
+ eerror "vous devez saisir le numéro de l'option"
+ elif isnum "$option"; then
+ if [ $option -gt 0 -a $option -le ${#options[*]} ]; then
+ select_action="$action"
+ select_option="${options[$(($option - 1))]}"
+ break
+ fi
+ else
+ eerror "$option: numéro d'option incorrecte"
+ fi
+ fi
+ elif isnum "$choice"; then
+ # on a simplement donné un numéro d'option
+ action="$default_action"
+ if [ -n "$action" ]; then
+ if base_array_contains void_actions "$action"; then
+ select_action="$action"
+ select_option=
+ break
+ else
+ option="${choice// /}"
+ if [ -z "$option" ]; then
+ eerror "vous devez saisir le numéro de l'option"
+ elif isnum "$option"; then
+ if [ $option -gt 0 -a $option -le ${#options[*]} ]; then
+ select_action="$action"
+ select_option="${options[$(($option - 1))]}"
+ break
+ fi
+ else
+ eerror "$option: numéro d'option incorrecte"
+ fi
+ fi
+ else
+ eerror "Vous devez spécifier l'action à effectuer"
+ fi
+ elif [ -n "$choice" ]; then
+ eerror "$choice: action et/ou option incorrecte"
+ else
+ eerror "vous devez saisir l'action à effectuer"
+ fi
+ let c=$c+1
+ if [ $c -eq 5 ]; then
+ # sauter une ligne toutes les 4 tentatives
+ tooenc "" 1>&2
+ c=0
+ fi
+ done
+ __am_select_action="$select_action"
+ __am_select_option="$select_option"
+}
diff --git a/lib/nulib/bash/base.path b/lib/nulib/bash/base.path
new file mode 100644
index 0000000..0965347
--- /dev/null
+++ b/lib/nulib/bash/base.path
@@ -0,0 +1,158 @@
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+##@cooked nocomments
+module: base.path base_ "Fonctions de base: gestion des chemins et des fichiers"
+require: base.core
+
+function: base_in_path "tester l'existence d'un programme dans le PATH"
+function base_in_path() {
+ [ -n "$1" -a -x "$(which "$1" 2>/dev/null)" ]
+}
+
+function: base_delpath "supprimer le chemin \$1 de \$2(=PATH)"
+function base_delpath() {
+ local _qdir="${1//\//\\/}"
+ eval "export ${2:-PATH}; ${2:-PATH}"'="${'"${2:-PATH}"'#$1:}"; '"${2:-PATH}"'="${'"${2:-PATH}"'%:$1}"; '"${2:-PATH}"'="${'"${2:-PATH}"'//:$_qdir:/:}"; [ "$'"${2:-PATH}"'" == "$1" ] && '"${2:-PATH}"'='
+}
+
+function: base_addpath "Ajouter le chemin \$1 à la fin, dans \$2(=PATH), s'il n'y existe pas déjà"
+function base_addpath() {
+ local _qdir="${1//\//\\/}"
+ eval "export ${2:-PATH}; "'[ "${'"${2:-PATH}"'#$1:}" == "$'"${2:-PATH}"'" -a "${'"${2:-PATH}"'%:$1}" == "$'"${2:-PATH}"'" -a "${'"${2:-PATH}"'//:$_qdir:/:}" == "$'"${2:-PATH}"'" -a "$'"${2:-PATH}"'" != "$1" ] && '"${2:-PATH}"'="${'"${2:-PATH}"':+$'"${2:-PATH}"':}$1"'
+}
+
+function: base_inspathm "Ajouter le chemin \$1 au début, dans \$2(=PATH), s'il n'y existe pas déjà"
+function base_inspathm() {
+ local _qdir="${1//\//\\/}"
+ eval "export ${2:-PATH}; "'[ "${'"${2:-PATH}"'#$1:}" == "$'"${2:-PATH}"'" -a "${'"${2:-PATH}"'%:$1}" == "$'"${2:-PATH}"'" -a "${'"${2:-PATH}"'//:$_qdir:/:}" == "$'"${2:-PATH}"'" -a "$'"${2:-PATH}"'" != "$1" ] && '"${2:-PATH}"'="$1${'"${2:-PATH}"':+:$'"${2:-PATH}"'}"'
+}
+
+function: base_inspath "S'assurer que le chemin \$1 est au début de \$2(=PATH)"
+function base_inspath() {
+ base_delpath "$@"
+ base_inspathm "$@"
+}
+
+function: base_push_cwd "enregistrer le répertoire courant dans la variable \$2(=cwd) et se placer dans le répertoire \$1"
+function base_push_cwd() {
+ eval "${2:-cwd}"'="$(pwd)"'
+ cd "$1"
+}
+function: base_pop_cwd "se placer dans le répertoire \${!\$2}(=\$cwd) puis retourner le code d'erreur \$1(=0)"
+function base_pop_cwd() {
+ eval 'cd "$'"${2:-cwd}"'"'
+ return "${1:-0}"
+}
+
+################################################################################
+## fichiers temporaires
+
+function: base_mktempf "générer un fichier temporaire et retourner son nom"
+function base_mktempf() {
+ mktemp "${1:-"$TMPDIR/tmp.XXXXXX"}"
+}
+
+function: base_mktempd "générer un répertoire temporaire et retourner son nom"
+function base_mktempd() {
+ mktemp -d "${1:-"$TMPDIR/tmp.XXXXXX"}"
+}
+
+function base_ac__forgetall() { NULIB__AC_FILES=(); }
+base_ac__forgetall
+function base_ac__trap() {
+ local file
+ for file in "${NULIB__AC_FILES[@]}"; do
+ [ -e "$file" ] && rm -rf "$file" 2>/dev/null
+ done
+ base_ac__forgetall
+}
+trap base_ac__trap 1 3 15 EXIT
+
+function: base_autoclean "\
+Ajouter les fichiers spécifiés à la liste des fichiers à supprimer à la fin du
+programme"
+function base_autoclean() {
+ local file
+ for file in "$@"; do
+ [ -n "$file" ] && NULIB__AC_FILES=("${NULIB__AC_FILES[@]}" "$file")
+ done
+}
+
+function: base_ac_cleanall "\
+Supprimer *tous* les fichiers temporaires gérés par autoclean tout de suite."
+function base_ac_cleanall() {
+ base_ac__trap
+}
+
+function: base_ac_clean "\
+Supprimer les fichier temporaires \$1..@ si et seulement s'ils ont été générés
+par base_ac_set_tmpfile() ou base_ac_set_tmpdir()"
+function base_ac_clean() {
+ local file acfile found
+ local -a acfiles
+ for acfile in "${NULIB__AC_FILES[@]}"; do
+ found=
+ for file in "$@"; do
+ if [ "$file" == "$acfile" ]; then
+ found=1
+ [ -e "$file" ] && rm -rf "$file" 2>/dev/null
+ break
+ fi
+ done
+ [ -z "$found" ] && acfiles=("${acfiles[@]}" "$acfile")
+ done
+ NULIB__AC_FILES=("${acfiles[@]}")
+}
+
+function: base_ac_set_tmpfile "\
+Créer un fichier temporaire avec le motif \$2, l'ajouter à la liste des
+fichiers à supprimer en fin de programme, et mettre sa valeur dans la
+variable \$1
+
+En mode debug, si (\$5 est vide ou \${!5} est une valeur vraie), et si \$3 n'est
+pas vide, prendre ce fichier au lieu de générer un nouveau fichier temporaire.
+Si \$4==keep, ne pas écraser le fichier \$3 s'il existe."
+function base_ac_set_tmpfile() {
+ local se__d
+ if base_is_debug; then
+ if [ -n "$5" ]; then
+ is_yes "${!5}" && se__d=1
+ else
+ se__d=1
+ fi
+ fi
+ if [ -n "$se__d" -a -n "$3" ]; then
+ _setv "$1" "$3"
+ [ -f "$3" -a "$4" == keep ] || >"$3"
+ else
+ local se__t="$(base_mktempf "$2")"
+ base_autoclean "$se__t"
+ _setv "$1" "$se__t"
+ fi
+}
+
+function: base_ac_set_tmpdir "\
+Créer un répertoire temporaire avec le motif \$2, l'ajouter à la liste des
+fichiers à supprimer en fin de programme, et mettre sa valeur dans la
+variable \$1
+
+En mode debug, si (\$4 est vide ou \${!4} est une valeur vraie), et si \$3 n'est
+pas vide, prendre ce nom de répertoire au lieu de créer un nouveau répertoire
+temporaire"
+function base_ac_set_tmpdir() {
+ local sr__d
+ if base_is_debug; then
+ if [ -n "$4" ]; then
+ is_yes "${!4}" && sr__d=1
+ else
+ sr__d=1
+ fi
+ fi
+ if [ -n "$sr__d" -a -n "$3" ]; then
+ _setv "$1" "$3"
+ mkdir -p "$3"
+ else
+ local sr__t="$(base_mktempd "$2")"
+ base_autoclean "$sr__t"
+ _setv "$1" "$sr__t"
+ fi
+}
diff --git a/lib/nulib/bash/base.split b/lib/nulib/bash/base.split
new file mode 100644
index 0000000..5e900e1
--- /dev/null
+++ b/lib/nulib/bash/base.split
@@ -0,0 +1,189 @@
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+##@cooked nocomments
+module: base.split base_ "Fonctions de base: analyse et découpage de valeurs"
+require: base.arr
+
+function: base_splitfsep "\
+Découper \$1 de la forme first[SEPsecond] entre first, qui est placé dans la
+variable \$3(=first) et second, qui est placée dans la variable \$4(=second). \$2
+est la valeur SEP. Le découpage est faite sur la *première* occurence de SEP."
+function base_splitfsep() {
+ if [[ "$1" == *"$2"* ]]; then
+ setv "${3:-first}" "${1%%$2*}"
+ setv "${4:-second}" "${1#*$2}"
+ else
+ setv "${3:-first}" "$1"
+ setv "${4:-second}"
+ fi
+}
+
+function: base_splitfsep2 "\
+Découper \$1 de la forme [firstSEP]second entre first, qui est placé dans la
+variable \$3(=first) et second, qui est placée dans la variable \$4(=second). \$2
+est la valeur SEP. Le découpage est faite sur la *première* occurence de SEP."
+function base_splitfsep2() {
+ if [[ "$1" == *"$2"* ]]; then
+ setv "${3:-first}" "${1%%$2*}"
+ setv "${4:-second}" "${1#*$2}"
+ else
+ setv "${3:-first}"
+ setv "${4:-second}" "$1"
+ fi
+}
+
+function: base_splitlsep "\
+Découper \$1 de la forme first[SEPsecond] entre first, qui est placé dans la
+variable \$3(=first) et second, qui est placée dans la variable \$4(=second). \$2
+est la valeur SEP. Le découpage est faite sur la *dernière* occurence de SEP."
+function base_splitlsep() {
+ if [[ "$1" == *"$2"* ]]; then
+ setv "${3:-first}" "${1%$2*}"
+ setv "${4:-second}" "${1##*$2}"
+ else
+ setv "${3:-first}" "$1"
+ setv "${4:-second}"
+ fi
+}
+
+function: base_splitlsep2 "\
+Découper \$1 de la forme [firstSEP]second entre first, qui est placé dans la
+variable \$3(=first) et second, qui est placée dans la variable \$4(=second). \$2
+est la valeur SEP. Le découpage est faite sur la *dernière* occurence de SEP."
+function base_splitlsep2() {
+ if [[ "$1" == *"$2"* ]]; then
+ setv "${3:-first}" "${1%$2*}"
+ setv "${4:-second}" "${1##*$2}"
+ else
+ setv "${3:-first}"
+ setv "${4:-second}" "$1"
+ fi
+}
+
+function: base_splitvar "\
+Découper \$1 de la forme name[=value] entre le nom, qui est placé dans la
+variable \$2(=name) et la valeur, qui est placée dans la variable \$3(=value)"
+function base_splitvar() {
+ splitfsep "$1" = "${2:-name}" "${3:-value}"
+}
+
+function: base_splitpath "\
+Découper \$1 de la forme [dir/]name entre le répertoire, qui est placé dans la
+variable \$2(=dir), et le nom du fichier, qui est placé dans la variable
+\$3(=name)"
+function base_splitpath() {
+ splitlsep2 "$1" / "${2:-dir}" "${3:-name}"
+}
+
+function: base_splitname "\
+Découper \$1 de la forme basename[.ext] entre le nom de base du fichier, qui
+est placé dans la variable \$2(=basename) et l'extension, qui est placée dans
+la variable \$3(=ext)
+
+Attention, si \$1 est un chemin, le résultat risque d'être faussé. Par exemple,
+'splitname a.b/c' ne donne pas le résultat escompté."
+function base_splitname() {
+ splitlsep "$1" . "${2:-basename}" "${3:-ext}"
+}
+
+function: base_splithost "\
+Découper \$1 de la forme hostname[.domain] entre le nom d'hôte, qui est placé
+dans la variable \$2(=hostname) et le domaine, qui est placée dans la variable
+\$3(=domain)"
+function base_splithost() {
+ splitfsep "$1" . "${2:-hostname}" "${3:-domain}"
+}
+
+function: base_splituserhost "\
+Découper \$1 de la forme [user@]host entre le nom de l'utilisateur, qui est placé
+dans la variable \$2(=user) et le nom d'hôte, qui est placée dans la variable
+\$3(=host)"
+function base_splituserhost() {
+ splitfsep2 "$1" @ "${2:-user}" "${3:-host}"
+}
+
+function: base_splitpair "\
+Découper \$1 de la forme first[:second] entre la première valeur, qui est placé
+dans la variable \$2(=src) et la deuxième valeur, qui est placée dans la variable
+\$3(=dest)"
+function base_splitpair() {
+ splitfsep "$1" : "${2:-src}" "${3:-dest}"
+}
+
+function: base_splitproxy "\
+Découper \$1 de la forme http://[user:password@]host[:port]/ entre les valeurs
+\$2(=host), \$3(=port), \$4(=user), \$5(=password)
+
+S'il n'est pas spécifié, port vaut 3128 par défaut"
+function base_splitproxy() {
+ local sy__tmp sy__host sy__port sy__creds sy__user sy__password
+
+ sy__tmp="${1#http://}"
+ if [[ "$sy__tmp" == *@* ]]; then
+ sy__creds="${sy__tmp%%@*}"
+ sy__tmp="${sy__tmp#${sy__creds}@}"
+ splitpair "$sy__creds" sy__user sy__password
+ fi
+ sy__tmp="${sy__tmp%%/*}"
+ splitpair "$sy__tmp" sy__host sy__port
+ [ -n "$sy__port" ] || sy__port=3128
+
+ setv "${2:-host}" "$sy__host"
+ setv "${3:-port}" "$sy__port"
+ setv "${4:-user}" "$sy__user"
+ setv "${5:-password}" "$sy__password"
+}
+
+function: base_spliturl "\
+Découper \$1 de la forme scheme://[user:password@]host[:port]/path entre les
+valeurs \$2(=scheme), \$3(=user), \$4(=password), \$5(=host), \$6(=port), \$7(=path)
+
+S'il n'est pas spécifié, port vaut 80 pour http, 443 pour https, 21 pour ftp"
+function base_spliturl() {
+ local sl__tmp sl__scheme sl__creds sl__user sl__password sl__host sl__port sl__path
+
+ sl__scheme="${1%%:*}"
+ sl__tmp="${1#${sl__scheme}://}"
+ if [[ "$sl__tmp" == */* ]]; then
+ sl__path="${sl__tmp#*/}"
+ sl__tmp="${sl__tmp%%/*}"
+ fi
+ if [[ "$sl__tmp" == *@* ]]; then
+ sl__creds="${sl__tmp%%@*}"
+ sl__tmp="${sl__tmp#${sl__creds}@}"
+ splitpair "$sl__creds" sl__user sl__password
+ fi
+ splitpair "$sl__tmp" sl__host sl__port
+ if [ -z "$sl__port" ]; then
+ [ "$sl__scheme" == "http" ] && sl__port=80
+ [ "$sl__scheme" == "https" ] && sl__port=443
+ [ "$sl__scheme" == "ftp" ] && sl__port=21
+ fi
+
+ setv "${2:-scheme}" "$sl__scheme"
+ setv "${3:-user}" "$sl__user"
+ setv "${4:-password}" "$sl__password"
+ setv "${5:-host}" "$sl__host"
+ setv "${6:-port}" "$sl__port"
+ setv "${7:-path}" "$sl__path"
+}
+
+function: base_splitwcs "\
+Découper un nom de chemin \$1 entre la partie sans wildcards, qui est placée dans
+la variables \$2(=basedir), et la partie avec wildcards, qui est placée dans la
+variable \$3(=filespec)"
+function base_splitwcs() {
+ local ss__p="$1"
+ local ss__dd="${2:-basedir}" ss__df="${3:-filespec}" ss__part ss__d ss__f
+ local -a ss__parts
+ base_array_split ss__parts "$ss__p" "/"
+ for ss__part in "${ss__parts[@]}"; do
+ if [[ "$ss__part" == *\** ]] || [[ "$ss__part" == *\?* ]] || [ -n "$ss__f" ]; then
+ ss__f="${ss__f:+$ss__f/}$ss__part"
+ else
+ ss__d="${ss__d:+$ss__d/}$ss__part"
+ fi
+ done
+ [ "${ss__p#/}" != "$ss__p" ] && ss__d="/$ss__d"
+ _setv "$ss__dd" "$ss__d"
+ _setv "$ss__df" "$ss__f"
+}
diff --git a/lib/nulib/bash/base.str b/lib/nulib/bash/base.str
new file mode 100644
index 0000000..7200a3c
--- /dev/null
+++ b/lib/nulib/bash/base.str
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+##@cooked nocomments
+module: base.str base_ "Fonctions de base: gestion des valeurs chaines"
+
+function: base_strmid "Afficher la plage \$1 de la valeur \$2..*
+
+La plage peut être d'une des formes 'start', '[start]:length'. Si start est
+négatif, le compte est effectué à partir de la fin de la chaine. Si length est
+négatif, il est rajouté à la longueur de la chaine à partir de start"
+function base_strmid() {
+ local range="$1"; shift
+ local str="$*"
+ if [[ "$range" == *:-* ]]; then
+ local max=${#str}
+ [ $max -eq 0 ] && return
+ local start="${range%%:*}"
+ [ -n "$start" ] || start=0
+ while [ "$start" -lt 0 ]; do
+ start=$(($max$start))
+ done
+ max=$(($max-$start))
+ local length="${range#*:}"
+ while [ "$length" -lt 0 ]; do
+ length=$(($max$length))
+ done
+ range="$start:$length"
+ fi
+ eval 'echo "${str:'" $range"'}"'
+}
+
+function: base_strrepl "Remplacer dans la valeur \$3..* le motif \$1 par la chaine \$2
+
+\$1 peut commencer par l'un des caractères /, #, % pour indiquer le type de recherche"
+function base_strrepl() {
+ local pattern="$1"; shift
+ local repl="$1"; shift
+ local str="$*"
+ local cmd='echo "${str/'
+ if [ "${pattern#/}" != "$pattern" ]; then
+ pattern="${pattern#/}"
+ cmd="$cmd/"
+ elif [ "${pattern#\#}" != "$pattern" ]; then
+ pattern="${pattern#\#}"
+ cmd="$cmd#"
+ elif [ "${pattern#%}" != "$pattern" ]; then
+ pattern="${pattern#%}"
+ cmd="$cmd%"
+ fi
+ cmd="$cmd"'$pattern/$repl}"'
+ eval "$cmd"
+}
+
+function: base_strlcomp "transformer dans le flux en entrée en UTF-8 certains caractères en leur équivalent transformable en latin1.
+
+si cette fonction est appelée avec des arguments, prendre \$* comme valeur du flux en entrée."
+function base_strlcomp() {
+ if [ $# -gt 0 ]; then base_strlcomp <<<"$*"
+ else LANG=fr_FR.UTF-8 sed $'
+s/[\xE2\x80\x90\xE2\x80\x91\xE2\x80\x92\xE2\x80\x93\xE2\x80\x94\xE2\x80\x95]/-/g
+s/[‘’]/\x27/g
+s/[«»“”]/"/g
+s/[\xC2\xA0\xE2\x80\x87\xE2\x80\xAF\xE2\x81\xA0]/ /g
+s/[œ]/oe/g
+s/[Œ]/OE/g
+s/[æ]/ae/g
+s/[Æ]/AE/g
+s/a\xCC\x80/à/g
+s/e\xCC\x81/é/g; s/e\xCC\x80/è/g; s/e\xCC\x82/ê/g; s/e\xCC\x88/ë/g
+s/i\xCC\x88/ï/g; s/i\xCC\x82/î/g
+s/o\xCC\x82/ô/g; s/o\xCC\x88/ö/g
+s/u\xCC\x88/ü/g; s/u\xCC\x82/û/g
+s/c\xCC\xA7/ç/g
+s/A\xCC\x80/À/g
+s/E\xCC\x81/É/g; s/E\xCC\x80/È/g; s/E\xCC\x82/Ê/g; s/E\xCC\x88/Ë/g
+s/I\xCC\x88/Ï/g; s/I\xCC\x82/Î/g
+s/O\xCC\x82/Ô/g; s/O\xCC\x88/Ö/g
+s/U\xCC\x88/Ü/g; s/U\xCC\x82/Û/g
+s/C\xCC\xA7/Ç/g
+'
+ fi
+}
+
+function: base_strnacc "supprimer les accents dans le flux en entrée en UTF-8
+
+si cette fonction est appelée avec des arguments, prendre \$* comme valeur du flux en entrée."
+function base_strnacc() {
+ if [ $# -gt 0 ]; then base_strnacc <<<"$*"
+ else LANG=fr_FR.UTF-8 sed '
+s/[à]/a/g
+s/[éèêë]/e/g
+s/[ïî]/i/g
+s/[ôö]/o/g
+s/[üû]/u/g
+s/[ç]/c/g
+s/[À]/A/g
+s/[ÉÈÊË]/E/g
+s/[ÏÎ]/I/g
+s/[ÔÖ]/O/g
+s/[ÜÛ]/U/g
+s/[Ç]/C/g
+'
+ fi
+}
+
+function: base_stripnl "Supprimer dans le flux en entrée les caractères de fin de ligne
+
+si cette fonction est appelée avec des arguments, prendre \$* comme valeur du flux en entrée."
+function base_stripnl() {
+ if [ $# -gt 0 ]; then base_stripnl <<<"$*"
+ else tr -d '\r\n'
+ fi
+}
+
+function: base_nl2lf "transformer dans le flux en entrée les fins de ligne en LF
+
+si cette fonction est appelée avec des arguments, prendre \$* comme valeur du flux en entrée."
+function base_nl2lf() {
+ if [ $# -gt 0 ]; then base_nl2lf <<<"$*"
+ else lawk 'BEGIN {RS="\r|\r\n|\n"} {print}'
+ fi
+}
+
+function: base_nl2crlf "transformer dans le flux en entrée les fins de ligne en CRLF
+
+si cette fonction est appelée avec des arguments, prendre \$* comme valeur du flux en entrée."
+function base_nl2crlf() {
+ if [ $# -gt 0 ]; then base_nl2crlf <<<"$*"
+ else lawk 'BEGIN {RS="\r|\r\n|\n"} {print $0 "\r"}'
+ fi
+}
+
+function: base_nl2cr "transformer dans le flux en entrée les fins de ligne en CR
+
+si cette fonction est appelée avec des arguments, prendre \$* comme valeur du flux en entrée."
+function base_nl2cr() {
+ if [ $# -gt 0 ]; then base_nl2cr <<<"$*"
+ else lawk 'BEGIN {RS="\r|\r\n|\n"; ORS=""} {print $0 "\r"}'
+ fi
+}
diff --git a/lib/nulib/bash/git b/lib/nulib/bash/git
new file mode 100644
index 0000000..99c213b
--- /dev/null
+++ b/lib/nulib/bash/git
@@ -0,0 +1,704 @@
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+##@cooked nocomments
+##@require nulib.sh
+##@require base
+module: git "" "Fonctions pour faciliter l'utilisation de git"
+require: nulib.sh base
+
+function: git_geturl ""
+function git_geturl() {
+ git config --get remote.origin.url
+}
+
+function: git_have_annex ""
+function git_have_annex() {
+ [ -n "$(git config --get annex.uuid)" ]
+}
+
+NULIB_GIT_FUNCTIONS=(
+ git_check_gitvcs git_ensure_gitvcs
+ git_list_branches git_list_rbranches
+ git_have_branch git_have_rbranch
+ git_get_branch git_is_branch
+ git_have_remote git_track_branch
+ git_check_cleancheckout git_ensure_cleancheckout
+ git_is_ancestor git_should_ff git_should_push
+ git_is_merged
+)
+NULIB_GIT_FUNCTIONS_MAP=(
+ cg:git_check_gitvcs eg:git_ensure_gitvcs
+ lbs:git_list_branches rbs:git_list_rbranches
+ hlb:git_have_branch hrb:git_have_rbranch
+ gb:git_get_branch ib:git_is_branch
+ hr:git_have_remote tb:git_track_branch
+ cc:git_check_cleancheckout ec:git_ensure_cleancheckout
+ ia:git_is_ancestor sff:git_should_ff spu:git_should_push
+ im:git_is_merged
+)
+
+function: git_check_gitvcs ""
+function git_check_gitvcs() {
+ git rev-parse --show-toplevel >&/dev/null
+}
+
+function: git_ensure_gitvcs ""
+function git_ensure_gitvcs() {
+ git_check_gitvcs || edie "Ce n'est pas un dépôt git" || return
+}
+
+function: git_list_branches ""
+function git_list_branches() {
+ git for-each-ref refs/heads/ --format='%(refname:short)' | csort
+}
+
+function: git_list_rbranches ""
+function git_list_rbranches() {
+ git for-each-ref "refs/remotes/${1:-origin}/" --format='%(refname:short)' | csort
+}
+
+function: git_list_pbranches "lister les branches locales et celles qui existent dans l'origine \$1(=origin) et qui pourraient devenir une branche locale avec la commande git checkout -b"
+function git_list_pbranches() {
+ local prefix="${1:-origin}/"
+ {
+ git for-each-ref refs/heads/ --format='%(refname:short)'
+ git for-each-ref "refs/remotes/$prefix" --format='%(refname:short)' | grep -F "$prefix" | cut -c $((${#prefix} + 1))-
+ } | grep -vF HEAD | csort -u
+}
+
+function: git_have_branch ""
+function git_have_branch() {
+ git_list_branches | grep -qF "$1"
+}
+
+function: git_have_rbranch ""
+function git_have_rbranch() {
+ git_list_rbranches "${2:-origin}" | grep -qF "$1"
+}
+
+function: git_get_branch ""
+function git_get_branch() {
+ git rev-parse --abbrev-ref HEAD 2>/dev/null
+}
+
+function: git_get_branch_remote ""
+function git_get_branch_remote() {
+ local branch="$1"
+ [ -n "$branch" ] || branch="$(git_get_branch)"
+ [ -n "$branch" ] || return
+ git config --get "branch.$branch.remote"
+}
+
+function: git_get_branch_merge ""
+function git_get_branch_merge() {
+ local branch="$1"
+ [ -n "$branch" ] || branch="$(git_get_branch)"
+ [ -n "$branch" ] || return
+ git config --get "branch.$branch.merge"
+}
+
+function: git_get_branch_rbranch ""
+function git_get_branch_rbranch() {
+ local branch="$1" remote="$2" merge
+ [ -n "$branch" ] || branch="$(git_get_branch)"
+ [ -n "$branch" ] || return
+ [ -n "$remote" ] || remote="$(git_get_branch_remote "$branch")"
+ [ -n "$remote" ] || return
+ merge="$(git_get_branch_merge "$branch")"
+ [ -n "$merge" ] || return
+ echo "refs/remotes/$remote/${merge#refs/heads/}"
+}
+
+function: git_is_branch ""
+function git_is_branch() {
+ [ "$(git_get_branch)" == "${1:-master}" ]
+}
+
+function: git_have_remote ""
+function git_have_remote() {
+ [ -n "$(git config --get remote.${1:-origin}.url)" ]
+}
+
+function: git_track_branch ""
+function git_track_branch() {
+ local branch="$1" origin="${2:-origin}"
+ [ -n "$branch" ] || return
+ git_have_remote "$origin" || return
+ [ "$(git config --get branch.$branch.remote)" == "$origin" ] && return
+ if git_have_rbranch "$branch" "$origin"; then
+ if git_have_branch "$branch"; then
+ git branch -u "$origin/$branch" "$branch"
+ else
+ git branch -t "$branch" "$origin/$branch"
+ fi
+ elif git_have_branch "$branch"; then
+ git push -u "$origin" "$branch" || return
+ fi
+}
+
+function: git_ensure_branch "
+@return 0 si la branche a été créée, 1 si elle existait déjà, 2 en cas d'erreur"
+function git_ensure_branch() {
+ local branch="$1" source="${2:-master}" origin="${3:-origin}"
+ [ -n "$branch" ] || return 2
+ git_have_branch "$branch" && return 1
+ if git_have_rbranch "$branch" "$origin"; then
+ # une branche du même nom existe dans l'origine. faire une copie de cette branche
+ git branch -t "$branch" "$origin/$branch" || return 2
+ else
+ # créer une nouvelle branche du nom spécifié
+ git_have_branch "$source" || return 2
+ git branch "$branch" "$source" || return 2
+ if [ -z "$NULIB_GIT_OFFLINE" ]; then
+ git_have_remote "$origin" && git_track_branch "$branch" "$origin"
+ fi
+ fi
+ return 0
+}
+
+function: git_check_cleancheckout "vérifier qu'il n'y a pas de modification locales dans le dépôt correspondant au répertoire courant."
+function git_check_cleancheckout() {
+ [ -z "$(git status --porcelain 2>/dev/null)" ]
+}
+
+function: git_ensure_cleancheckout ""
+function git_ensure_cleancheckout() {
+ git_check_cleancheckout ||
+ edie "Vous avez des modifications locales. Enregistrez ces modifications avant de continuer" || return
+}
+
+function git__init_ff() {
+ o="${3:-origin}"
+ b="$1" s="${2:-refs/remotes/$o/$1}"
+ b="$(git rev-parse --verify --quiet "$b")" || return 1
+ s="$(git rev-parse --verify --quiet "$s")" || return 1
+ return 0
+}
+function git__can_ff() {
+ [ "$1" == "$(git merge-base "$1" "$2")" ]
+}
+
+function: git_is_ancestor "vérifier que la branche \$1 est un ancêtre direct de la branche \$2, qui vaut par défaut refs/remotes/\${3:-origin}/\$1
+note: cette fonction retourne vrai si \$1 et \$2 identifient le même commit"
+function git_is_ancestor() {
+ local o b s; git__init_ff "$@" || return
+ git__can_ff "$b" "$s"
+}
+
+function: git_should_ff "vérifier si la branche \$1 devrait être fast-forwardée à partir de la branche d'origine \$2, qui vaut par défaut refs/remotes/\${3:-origin}/\$1
+note: cette fonction est similaire à git_is_ancestor(), mais retourne false si \$1 et \$2 identifient le même commit"
+function git_should_ff() {
+ local o b s; git__init_ff "$@" || return
+ [ "$b" != "$s" ] || return 1
+ git__can_ff "$b" "$s"
+}
+
+function: git_should_push "vérifier si la branche \$1 devrait être poussée vers la branche de même nom dans l'origine \$2(=origin), parce que l'origin peut-être fast-forwardée à partir de cette branche."
+function git_should_push() {
+ git_should_ff "refs/remotes/${2:-origin}/$1" "$1"
+}
+
+function: git_fast_forward "vérifier que la branche courante est bien \$1, puis tester s'il faut la fast-forwarder à partir de la branche d'origine \$2, puis le faire si c'est nécessaire. la branche d'origine \$2 vaut par défaut refs/remotes/origin/\$1"
+function git_fast_forward() {
+ local o b s; git__init_ff "$@" || return
+ [ "$b" != "$s" ] || return 1
+ local head="$(git rev-parse HEAD)"
+ [ "$head" == "$b" ] || return 1
+ git__can_ff "$b" "$s" || return 1
+ git merge --ff-only "$s"
+}
+
+function: git_is_merged "vérifier que les branches \$1 et \$2 ont un ancêtre commun, et que la branche \$1 a été complètement fusionnée dans la branche destination \$2"
+function git_is_merged() {
+ local b="$1" d="$2"
+ b="$(git rev-parse --verify --quiet "$b")" || return 1
+ d="$(git rev-parse --verify --quiet "$d")" || return 1
+ [ -n "$(git merge-base "$b" "$d")" ] || return 1
+ [ -z "$(git rev-list "$d..$b")" ]
+}
+
+################################################################################
+# git annex
+
+NULIB_GIT_SSH_WRAPPER=
+function: git_annex_use_ssh_wrapper ""
+function git_annex_use_ssh_wrapper() {
+ [ -n "$NULIB_GIT_SSH_WRAPPER" ] && return
+ NULIB_GIT_FORCE_PATH="$PATH"
+ NULIB_GIT_FORCE_SSH="${GIT_SSH:-ssh}"
+ export NULIB_GIT_FORCE_PATH NULIB_GIT_FORCE_SSH
+ base_delpath "$NULIBDIR/ssh-wrapper" NULIB_GIT_FORCE_PATH
+ base_inspath "$NULIBDIR/ssh-wrapper" PATH
+ NULIB_GIT_SSH_WRAPPER=1
+}
+
+function: git_annex_initial "sur le dépôt \$1 fraichement cloné, vérifier s'il faut faire git annex init. Si oui, l'initialiser avec le nom d'hôte, et récupérer tous les fichiers annexés
+@return 1 si une erreur s'est produite"
+function git_annex_initial() {
+ local repodir="${1:-.}"
+ [ -d "$repodir" ] || return 1
+ repodir="$(abspath "$repodir")"
+
+ local GIT_DIR GIT_WORK_TREE
+ [ "$(cd "$repodir"; git rev-parse --is-bare-repository)" == false ] || return 0
+ [ -n "$(GIT_DIR="$repodir/.git" git config --get annex.uuid)" ] && return 0
+
+ # ici, on sait que git annex n'a pas encore été configuré
+ # vérifier s'il existe des fichiers annexés
+ local -a links
+ base_array_splitl links "$(find "$repodir" -type l)"
+ local link hasannex=
+ for link in "${links[@]}"; do
+ link="$(readlink "$link")"
+ if [ "${link#.git/annex/}" != "$link" ]; then
+ hasannex=1
+ break
+ elif [[ "$link" == */.git/annex/* ]]; then
+ hasannex=1
+ break
+ fi
+ done
+
+ if [ -n "$hasannex" ]; then
+ base_in_path git-annex || edie "Vous devez installer git-annex" || return
+ local cwd; base_push_cwd "$repodir" &&
+ git annex init "$MYHOSTNAME" &&
+ git annex get &&
+ git annex sync &&
+ base_pop_cwd || base_pop_cwd 1 || return
+ fi
+}
+
+################################################################################
+# Outils de haut niveau
+
+function: git_commit ""
+function git_commit() {
+ local all=auto allnew push=auto nopush args
+ setyesval nopush "$NULIB_GIT_OFFLINE"
+ [ -n "$nopush" ] && push=
+ parse_opts + "${PRETTYOPTS[@]}" \
+ -a,--all all=1 \
+ -A,--all-new allnew=1 \
+ -c,--cached all= \
+ -p,--push push=1 \
+ -l,--local push= \
+ @ args -- "$@" && set -- "${args[@]}" || {
+ eerror "$args"
+ return 1
+ }
+
+ if [ -n "$allnew" ]; then
+ git add -A
+ all=
+ fi
+
+ local message="$1"; shift
+ local -a cmd
+ cmd=(git commit)
+ [ -n "$message" ] && cmd=("${cmd[@]}" -m "$message")
+ if [ "$all" == "auto" ]; then
+ # Si des fichiers sont spécifiés, prendre ceux-là.
+ if [ -z "$*" ]; then
+ # Sinon, s'il y a des fichiers dans l'index, commiter uniquement ces
+ # fichiers
+ # Sinon, committer tous les fichiers modifiés
+ # le code suivant retourne vrai si l'index contient au moins fichier
+ git status --porcelain 2>/dev/null | lawk '
+ BEGIN { ec = 1 }
+ substr($0, 1, 1) ~ /[^ ?]/ { ec = 0; exit }
+ END { exit ec }' ||
+ cmd=("${cmd[@]}" -a)
+ fi
+ else
+ [ -n "$all" ] && cmd=("${cmd[@]}" -a)
+ fi
+
+ if ! "${cmd[@]}" "$@"; then
+ [ "$push" == auto ] && return 1
+ fi
+ if [ "$push" == auto ]; then
+ git_push --auto || return
+ elif [ -n "$push" ]; then
+ git_push --force || return
+ fi
+ return 0
+}
+
+function: git_update ""
+function git_update() {
+ local args autoff=1
+ parse_opts + "${PRETTYOPTS[@]}" \
+ -n,--no-autoff autoff= \
+ @ args -- "$@" && set -- "${args[@]}" || {
+ eerror "$args"
+ return 1
+ }
+
+ if [ -z "$autoff" ]; then
+ git pull "$@"
+ return $?
+ fi
+
+ local branch orig_branch restore_branch remote rbranch pbranch
+ local -a branches prbranches crbranches dbranches
+
+ base_array_splitl prbranches "$(git_list_rbranches)"
+ git fetch -p "$@" || return
+ base_array_splitl crbranches "$(git_list_rbranches)"
+
+ # vérifier s'il n'y a pas des branches distantes qui ont été supprimées
+ for branch in "${prbranches[@]}"; do
+ if ! base_array_contains crbranches "$branch"; then
+ base_array_add dbranches "${branch#*/}"
+ fi
+ done
+ if [ ${#dbranches[*]} -gt 0 ]; then
+ eimportant "One or more distant branches where deleted"
+ for branch in "${dbranches[@]}"; do
+ if git_have_branch "$branch"; then
+ if ! ask_yesno "Do you want to delete local branch $branch?" X; then
+ base_array_del dbranches "$branch"
+ fi
+ fi
+ done
+ fi
+ if [ ${#dbranches[*]} -gt 0 ]; then
+ base_array_splitl branches "$(git_list_branches)"
+ branch="$(git_get_branch)"
+ if base_array_contains dbranches "$branch"; then
+ # si la branche courante est l'une des branches à supprimer, il faut
+ # basculer vers develop ou master
+ local swto
+ if [ -z "$swto" ] && base_array_contains branches develop && ! base_array_contains dbranches develop; then
+ swto=develop
+ fi
+ if [ -z "$swto" ] && base_array_contains branches master && ! base_array_contains dbranches master; then
+ swto=master
+ fi
+ if ! git_check_cleancheckout; then
+ echo "* There are uncommitted local changes. However current branch is slated for removal.
+Make your verifications then delete the local branches:
+ ${swto:+$(qvals git checkout "$swto")
+ }$(qvals git branch -D "${dbranches[@]}")"
+ return 1
+ fi
+ if [ -n "$swto" ]; then
+ git checkout -q "$swto"
+ else
+ echo "* Current branch is slated for removal but I don't know to which branch I should switch first.
+Make your choice then delete the local branches:
+ $(qvals git branch -D "${dbranches[@]}")"
+ return 1
+ fi
+ fi
+ for branch in "${dbranches[@]}"; do
+ git branch -D "$branch"
+ done
+ fi
+
+ # intégrer les modifications dans les branches locales
+ if ! git_check_cleancheckout; then
+ branch="$(git_get_branch)"
+ remote="$(git_get_branch_remote "$branch")"
+ rbranch="$(git_get_branch_rbranch "$branch" "$remote")"
+ pbranch="${rbranch#refs/remotes/}"
+ if git merge -q --ff-only "$rbranch"; then
+ echo "* There are uncommitted local changes: only CURRENT branch were updated"
+ fi
+ return 0
+ fi
+
+ orig_branch="$(git_get_branch)"
+ base_array_splitl branches "$(git_list_branches)"
+ for branch in "${branches[@]}"; do
+ remote="$(git_get_branch_remote "$branch")"
+ rbranch="$(git_get_branch_rbranch "$branch" "$remote")"
+ pbranch="${rbranch#refs/remotes/}"
+ [ -n "$remote" -a -n "$rbranch" ] || continue
+ if git_is_ancestor "$branch" "$rbranch"; then
+ if git_should_ff "$branch" "$rbranch"; then
+ echo "* Fast-forwarding $branch -> $pbranch"
+ git checkout -q "$branch"
+ git merge -q --ff-only "$rbranch"
+ restore_branch=1
+ fi
+ else
+ if [ "$branch" == "$orig_branch" ]; then
+ echo "* Cannot fast-forward CURRENT branch $branch from $pbranch
+Try to merge manually with: git merge $pbranch"
+ else
+ echo "* Cannot fast-forward local branch $branch from $pbranch
+You can merge manually with: git checkout $branch; git merge $pbranch"
+ fi
+ fi
+ done
+ [ -n "$restore_branch" ] && git checkout -q "$orig_branch"
+ return 0
+}
+
+function: git_push ""
+function git_push() {
+ local all all_branches all_tags auto force args no_annex
+ parse_opts + "${PRETTYOPTS[@]}" \
+ -a,--all all=1 \
+ -b,--branches,--all-branches all_branches=1 \
+ -t,--tags,--all-tags all_tags=1 \
+ --auto auto=1 \
+ -f,--force force=1 \
+ -n,--no-annex no_annex=1 \
+ @ args -- "$@" && set -- "${args[@]}" || {
+ eerror "$args"
+ return 1
+ }
+
+ if [ -n "$all" ]; then
+ # On a demandé à pusher toutes les branches et tous les tags
+ local r
+ git push --all "$@"; r=$?
+ if [ $r -eq 0 ]; then
+ git push --tags "$@"; r=$?
+ fi
+ return $r
+ elif [ -n "$all_branches" ]; then
+ # On a demandé à pusher toutes les branches
+ git push --all "$@"
+ return $?
+ elif [ -n "$all_tags" ]; then
+ # On a demandé à pusher tous les tags
+ git push --tags "$@"
+ return $?
+ elif [ $# -gt 0 ]; then
+ # Sinon, si des arguments sont spécifiés, les passer à git sans
+ # modification
+ git push "$@"
+ return $?
+ elif git_have_annex; then
+ # Si une annexe existe dans le dépôt, demander à git-annex de faire la
+ # synchronisation, sauf si --no-annex est spécifié ou si on est en mode
+ # automatique
+ if [ -z "$no_annex" -a -z "$auto" ]; then
+ git annex sync
+ return $?
+ fi
+ fi
+
+ # sinon on push vers origin. vérifier la présence du remote
+ [ -n "$(git config --get remote.origin.url)" ] || {
+ if [ -n "$auto" ]; then
+ # en mode automatique, ignorer l'absence de remote
+ return 0
+ else
+ eerror "Aucun remote origin n'est défini"
+ return 1
+ fi
+ }
+
+ # puis calculer la branche à pusher
+ local branch="$(git rev-parse --abbrev-ref HEAD 2>/dev/null)"
+ local origin="$(git config --get "branch.$branch.remote")"
+ if [ -n "$branch" -a "$origin" == origin ]; then
+ if [ -n "$auto" ]; then
+ # en mode automatique, ne pousser que la branche courante
+ git push "$origin" "$branch" || return
+ else
+ # utiliser la configuration par défaut, qui est sous debian squeeze
+ # de pousser toutes les branches
+ git push || return
+ fi
+ elif [ -n "$force" ]; then
+ # utiliser la configuration par défaut, qui est sous debian squeeze de
+ # pousser toutes les branches
+ git push || return
+ fi
+ return 0
+}
+
+function git__pclone() {
+ estep "$1 --> $(ppath "$2")"
+ mkdirof "$2" || return 1
+ git clone "$1" "$2" || return 1
+ if [ -z "$3" ]; then
+ (
+ cd "$2"
+ if git_have_rbranch develop; then
+ git checkout develop || exit 1
+ fi
+ ) || return 1
+ fi
+ git_annex_initial "$2" || return 1
+}
+function git__gitolite_info() {
+ local mode="$1" urlbase="$2" pattern="$3"
+ case "$mode" in
+ http) curl -fs "$urlbase/info${pattern:+"?$pattern"}";;
+ ssh) ssh -q "$urlbase" info ${pattern:+"$pattern"} 2>/dev/null;;
+ esac
+}
+function git__filter_repos() {
+ lawk -v prefix="$1" '
+NR <= 2 { next }
+{
+ # filtrer les projets qui ne sont pas encore créés
+ if (substr($0, 5, 2) == " C") next
+ repo = substr($0, 6)
+ # filtrer les projets de type wildcard
+ if (repo ~ /[\[\]\*]/) next
+ # enlever le prefixe
+ if (prefix != "" && substr(repo, 1, length(prefix)) != prefix) next
+ print repo
+}'
+}
+
+function: git_clone ""
+function git_clone() {
+ no_clone=
+ update=
+ nodevelop=
+ recursive=
+ parse_opts "${PRETTYOPTS[@]}" \
+ -n,--no-clone no_clone=1 \
+ -u,--update update=1 \
+ -m,--master nodevelop=1 \
+ -r,--recursive recursive=1 \
+ @ args -- "$@" && set -- "${args[@]}" || edie "$args" || return
+
+ if [ -n "$recursive" ]; then
+ repobase="$1"
+ [ -n "$repobase" ] || edie "Vous devez spécifier l'url de base des dépôts à cloner" || return
+ if [ "${repobase#http://}" != "$repobase" -o "${repobase#https://}" != "$repobase" ]; then
+ # accès par http
+ mode=http
+ splitfsep "$repobase" :// scheme hostuserpath
+ splitfsep "$hostuserpath" / host userpath
+ splitfsep "$userpath" / user basepath
+ [ -n "$host" -a -n "$user" ] || edie "Vous devez spécifier l'hôte e.g http://host/git/basepath" || return
+ urlbase="$scheme://$host/$user"
+ else
+ # accès par ssh
+ mode=ssh
+ splitfsep "$repobase" : userhost basepath
+ splituserhost "$userhost" user host
+ [ -n "$user" ] || user=git
+ [ -n "$host" ] || edie "Vous devez spécifier l'hôte" || return
+ urlbase="$user@$host"
+ fi
+ basepath="${basepath%/}"
+ destbase="${2:-.}"
+
+ git_annex_use_ssh_wrapper
+ prefix="${basepath:+$basepath/}"
+ base_array_splitl repos "$(set -o pipefail; git__gitolite_info "$mode" "$urlbase" "$prefix" | git__filter_repos "$prefix")" || edie || return
+ for repo in "${repos[@]}"; do
+ case "$mode" in
+ http) repourl="$urlbase/$repo";;
+ ssh) repourl="$urlbase:$repo";;
+ esac
+ setx destdir=abspath "$destbase/${repo#$prefix}"
+ if [ -d "$destdir" ]; then
+ if [ -n "$update" ]; then
+ (
+ ${no_clone:+qvals} cd "$destdir"
+ ${no_clone:+qvals} git pull
+ ) || edie || return
+ else
+ estepe "$(ppath2 "$destdir"): répertoire existant"
+ fi
+ elif [ -n "$no_clone" ]; then
+ qvals git clone "$repourl" "$destdir"
+ else
+ git__pclone "$repourl" "$destdir" "$nodevelop" || edie || return
+ fi
+ done
+
+ else
+ repourl="${1%.git}"
+ [ -n "$repourl" ] || edie "Vous devez spécifier l'url du dépôt git" || return
+
+ destdir="$2"
+ if [ -z "$destdir" ]; then
+ splitfsep "$repourl" : userhost path
+ setx destdir=basename -- "$path"
+ destdir="${destdir%.git}"
+ fi
+ setx destdir=abspath "$destdir"
+
+ git_annex_use_ssh_wrapper
+ if [ -d "$destdir" ]; then
+ if [ -n "$update" ]; then
+ (
+ ${no_clone:+qvals} cd "$destdir"
+ ${no_clone:+qvals} git pull
+ ) || edie || return
+ else
+ estepe "$(ppath2 "$destdir"): répertoire existant"
+ fi
+ elif [ -n "$no_clone" ]; then
+ qvals git clone "$repourl" "$destdir"
+ else
+ git__pclone "$repourl" "$destdir" "$nodevelop" || edie || return
+ fi
+ fi
+}
+
+function: git_crone ""
+function git_crone() {
+ repourl="${1%.git}"
+ [ -n "$repourl" ] || edie "Vous devez spécifier l'url du dépôt git" || return
+ if [ "${repourl#http://}" != "$repourl" -o "${repourl#https://}" != "$repourl" ]; then
+ # accès par http
+ mode=http
+ splitfsep "$repourl" :// scheme hostuserpath
+ splitfsep "$hostuserpath" / host userpath
+ splitfsep "$userpath" / user path
+ [ -n "$host" -a -n "$user" ] || edie "Vous devez spécifier l'hôte e.g http://host/git/repo" || return
+ hostuser="$scheme://$host/$user"
+ else
+ # accès par ssh
+ mode=ssh
+ splitfsep "$repourl" : userhost path
+ splituserhost "$userhost" user host
+ [ -n "$user" ] || user=git
+ [ -n "$host" ] || edie "Vous devez spécifier l'hôte" || return
+ userhost="$user@$host"
+ fi
+ [ -n "$path" ] || edie "Vous devez spécifier le chemin du dépôt git" || return
+
+ destdir="$2"
+ if [ -z "$destdir" ]; then
+ setx destdir=basename -- "$path"
+ destdir="${destdir%.git}"
+ fi
+ tmpdestdir=
+ if [ -d "$destdir" ]; then
+ [ -d "$destdir/.git" ] && edie "$(ppath2 "$destdir"): un dépôt existe déjà" || return
+ ac_set_tmpdir tmpdestdir
+ fi
+
+ if [ "$mode" == http ]; then
+ setx result=curl -fs "$hostuser/create?$path" || edie || return
+ echo "$result"
+ [[ "$result" == FATAL:* ]] && edie || return
+ if [ -n "$tmpdestdir" ]; then
+ setxx destname=abspath "$destdir" // basename
+ git clone "$hostuser/$path" "$tmpdestdir/$destname" || edie || return
+ mv "$tmpdestdir/$destname/.git" "$destdir" || edie || return
+ ac_clean "$tmpdestdir"
+ else
+ git clone "$hostuser/$path" "$destdir" || edie || return
+ fi
+ elif [ "$mode" == ssh ]; then
+ git_annex_use_ssh_wrapper
+ ssh "$userhost" create "$path" || edie || return
+ if [ -n "$tmpdestdir" ]; then
+ setxx destname=abspath "$destdir" // basename
+ git clone "$userhost:$path" "$tmpdestdir/$destname" || edie || return
+ mv "$tmpdestdir/$destname/.git" "$destdir" || edie || return
+ ac_clean "$tmpdestdir"
+ else
+ git clone "$userhost:$path" "$destdir" || edie || return
+ fi
+ else
+ edie "$mode: mode non supporté" || return
+ fi
+ git_annex_initial "$destdir" || edie || return
+}
diff --git a/lib/nulib/bash/nulib.sh b/lib/nulib/bash/nulib.sh
new file mode 120000
index 0000000..b22bb26
--- /dev/null
+++ b/lib/nulib/bash/nulib.sh
@@ -0,0 +1 @@
+../load.sh
\ No newline at end of file
diff --git a/lib/nulib/bash/pretty b/lib/nulib/bash/pretty
new file mode 100644
index 0000000..a46848f
--- /dev/null
+++ b/lib/nulib/bash/pretty
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+##@cooked nocomments
+module: pretty base_ "Affichage en couleur"
+require: base
diff --git a/lib/nulib/bash/sysinfos b/lib/nulib/bash/sysinfos
new file mode 100644
index 0000000..2fa077a
--- /dev/null
+++ b/lib/nulib/bash/sysinfos
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+##@cooked nocomments
+module: sysinfos base_ "Informations sur le système courant"
+require: base
diff --git a/lib/nulib/bshell b/lib/nulib/bshell
new file mode 100755
index 0000000..9d4e2e2
--- /dev/null
+++ b/lib/nulib/bshell
@@ -0,0 +1,35 @@
+#!/bin/bash
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+source "$(dirname -- "$0")/load.sh" || exit 1
+export NULIBDIR NULIBINIT
+
+ac_set_tmpfile bashrc
+echo >"$bashrc" "\
+if ! grep -q '/etc/bash.bashrc' /etc/profile; then
+ [ -f /etc/bash.bashrc ] && source /etc/bash.bashrc
+fi
+if ! grep -q '~/.bashrc' ~/.bash_profile; then
+ [ -f ~/.bashrc ] && source ~/.bashrc
+fi
+[ -f /etc/profile ] && source /etc/profile
+[ -f ~/.bash_profile ] && source ~/.bash_profile
+
+# Modifier le PATH. Ajouter le chemin vers les scripts de support
+#PATH=$(qval "$MYDIR:$PATH")
+
+if [ -n '$DEFAULT_PS1' ]; then
+ DEFAULT_PS1=$(qval "[nulib-shell] $DEFAULT_PS1")
+else
+ if [ -z '$PS1' ]; then
+ PS1='\\u@\\h \\w \\$ '
+ fi
+ PS1=\"[nulib-shell] \$PS1\"
+fi
+$(qvals source "$MYDIR/load.sh")"
+
+"$SHELL" --rcfile "$bashrc" -i -- "$@"
+# note: ne pas faire exec "$SHELL", parce que sinon le fichier temporaire bashrc
+# n'est pas supprimé
+
+ac_clean "$bashrc"
+exit 0
diff --git a/lib/nulib/ddb-query_rtoinst b/lib/nulib/ddb-query_rtoinst
new file mode 100755
index 0000000..2604d45
--- /dev/null
+++ b/lib/nulib/ddb-query_rtoinst
@@ -0,0 +1,3 @@
+#!/bin/bash
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+"$(dirname -- "$0")/deploydb" -m toinst --run -r toinst.query_rtoinst "$@"
diff --git a/lib/nulib/ddb-query_rwoinst b/lib/nulib/ddb-query_rwoinst
new file mode 100755
index 0000000..9eb1138
--- /dev/null
+++ b/lib/nulib/ddb-query_rwoinst
@@ -0,0 +1,3 @@
+#!/bin/bash
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+"$(dirname -- "$0")/deploydb" -m woinst --run -r woinst.query_rwoinst "$@"
diff --git a/lib/nulib/ddb-query_rwyinst b/lib/nulib/ddb-query_rwyinst
new file mode 100755
index 0000000..e5bf429
--- /dev/null
+++ b/lib/nulib/ddb-query_rwyinst
@@ -0,0 +1,3 @@
+#!/bin/bash
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+"$(dirname -- "$0")/deploydb" -m wyinst --run -r wyinst.query_rwyinst "$@"
diff --git a/lib/nulib/ddb-query_xuinst b/lib/nulib/ddb-query_xuinst
new file mode 100755
index 0000000..58037e9
--- /dev/null
+++ b/lib/nulib/ddb-query_xuinst
@@ -0,0 +1,3 @@
+#!/bin/bash
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+"$(dirname -- "$0")/deploydb" -m uinst --run -r uinst.query_xuinst "$@"
diff --git a/lib/nulib/ddb-save_objects b/lib/nulib/ddb-save_objects
new file mode 100755
index 0000000..24778b7
--- /dev/null
+++ b/lib/nulib/ddb-save_objects
@@ -0,0 +1,3 @@
+#!/bin/bash
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+"$(dirname -- "$0")/deploydb" --run -r base.save_objects "$@"
diff --git a/lib/nulib/deploydb b/lib/nulib/deploydb
new file mode 100755
index 0000000..9b9976b
--- /dev/null
+++ b/lib/nulib/deploydb
@@ -0,0 +1,12 @@
+#!/bin/bash
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+MYNAME="$(basename -- "$0")"
+: "${PYTHON_MAIN_MODULE:=$MYNAME}"
+
+MYDIR="$(dirname -- "$0")"
+if [ -n "$PYTHONPATH" ]; then PYTHONPATH="$MYDIR/python:$PYTHONPATH"
+else PYTHONPATH="$MYDIR/python"
+fi
+export PYTHONPATH
+
+exec python2.7 -m "$PYTHON_MAIN_MODULE" "$@"
diff --git a/lib/nulib/deploydb.conf b/lib/nulib/deploydb.conf
new file mode 100644
index 0000000..ea2032e
--- /dev/null
+++ b/lib/nulib/deploydb.conf
@@ -0,0 +1,3 @@
+# -*- coding: utf-8 mode: conf -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+host localhost host=localhost.
diff --git a/lib/nulib/doc/deploydb/index.md b/lib/nulib/doc/deploydb/index.md
new file mode 100644
index 0000000..bd48877
--- /dev/null
+++ b/lib/nulib/doc/deploydb/index.md
@@ -0,0 +1,259 @@
+`deploydb` est un moyen de décrire des informations de déploiement ou de
+configuration à propos de certains objets (hôtes, modules, webapps, woapps,
+etc.)
+
+# Syntaxe
+
+Le format du fichier de configuration est volontairement simple. Toute la
+logique est implémentée dans les clients qui accèdent à l'information stockée
+
+Le fichier contient deux sortes d'informations:
+* définition d'objet
+* définition de faits
+
+Les lignes vides ou commençant par '#' sont ignorées.
+Si une ligne commence par un espace, elle est fusionnée avec la ligne
+précédente.
+
+## Définition d'objet
+
+Une définition d'objet a le format suivant:
+~~~
+otype oid[=values] [ATTRS...] [LINKS...]
+~~~
+
+`otype`
+: type d'objet à créer ou à mettre à jour
+
+`oid`
+: identifiant de l'objet à créer ou à mettre à jour
+
+`values`
+: valeurs de l'objet, séparées par des virgules
+
+`ATTR`
+: attribut de l'objet
+
+`LINK`
+: définition d'objet lié
+
+Une définition d'attribut a l'un des formats suivants:
+~~~
+name[=value]
+name+[=value]
+name-[=value]
+name%[=value]
+~~~
+
+Les attributs sont multivalués, et par défaut, on rajoute la nouvelle valeur aux
+valeurs existantes sauf si elle existe déjà dans l'attribut.
+
+value vaut par défaut 'true', ce qui n'est pas la même chose qu'une valeur
+vide. comparer les deux définitions suivantes:
+~~~
+first # first vaut 'true'
+second= # second vaut ''
+~~~
+
+Les types de mise à jour valides sont:
+* `=` ajout de la valeur si l'attribut ne la contient pas déjà
+* `+=` ajout inconditionnel d'une valeur à l'attribut
+* `-=` suppression d'une valeur de l'attribut
+* `%=` remettre à zéro l'attribut d'abord
+
+Ainsi, les définitions suivantes sont équivalentes deux à deux:
+~~~
+attr=value attr=value # le doublon est supprimé
+attr=value # c'est comme si on ne spécifie la valeur qu'une seule fois
+
+attr=value attr%=first attr=second
+attr=value attr-=value attr=first attr=second
+~~~
+
+Une définition de lien a le format suivant:
+~~~
+-otype oids... [ATTRS...]
+~~~
+
+`otype`
+: type de l'objet lié
+
+`oids`
+: liste d'identifiants d'objets liés séparés par des virgules
+
+`ATTR`
+: attribut à rajouter à la définition du lien
+
+Voici un exemple complet:
+~~~
+humain bob nom=Payet prenom=Robert
+ age=42
+ desc="un humain qui a un père, une mère et deux voitures"
+ -humain eric type=pere
+ -humain martine type=mere
+ -vehicule titine,bumblebee
+humain eric nom=Payet prenom=Eric
+humain martine nom=Payet prenom="Martine Joséphine"
+vehicule titine marque=Citroen immatriculation=BX-467-PM
+vehicule bumblebee marque=Camaro type=autobot
+~~~
+
+## Définition de faits
+
+Un fait est l'affirmation d'un lien d'action ou d'état entre deux objets,
+décrit par un verbe. Par exemple, pour décrire le fait que bob mange une
+tarte, on écrirait:
+~~~
+humain bob
+aliment tarte
+
+-humain bob
+ mange -aliment tarte
+~~~
+
+Une définition de fait a le format suivant:
+~~~
+-sotype soids... [DEFATTRS...]
+ verb -totype toids... [FACTATTRS...]
+ ...
+~~~
+
+`sotype`
+`totype`
+: types d'objets source et cible
+
+`soid`
+`toid`
+: identifiants des objets source et cible
+
+`verb`
+: identifiant du lien entre la source et la destination. en général, il s'agit
+ d'un verbe d'action ou d'état conjugué à la 3ème personne du singulier.
+
+ si le verbe commence par `~` alors la définition est inversée. par exemple,
+ les deux faits suivants sont rigoureusement équivalents:
+ ~~~
+ -otype src verb -otype dest
+ -otype dest ~verb -otype src
+ ~~~
+ cela permet de supporter les cas où la définition inverse est plus facile.
+
+`DEFATTR`
+: attribut pour tous les faits définis dans cette déclaration
+
+`FACTATTR`
+: attribut spécifique au fait défini
+
+# deploydb
+
+Le script `deploydb` permet d'interroger la base de données ou de lancer une
+fonction pour traiter le contenu de la base de données
+
+Dans ce document, `DEPLOYDBDIR` désigne le répertoire du script `deploydb`
+
+Options
+`-c, --config CONFIG`
+: spécifier un fichier de configuration à charger. la valeur par défaut est
+ `deploydb.conf`
+
+ si le fichier de configuration n'est pas spécifié ou est spécifié sans chemin,
+ `deploydb:path` est initialisé avec la valeur par défaut suivante:
+ ~~~
+ ~/etc/deploydb:/var/local/deploydb:DEPLOYDBDIR
+ ~~~
+
+ sinon, `deploydb:path` est initialisé avec le répertoire de CONFIG
+
+`-m, --module MODULE`
+: spécifier un module supplémentaire à charger. le module python effectivement
+ cherché dans le path et chargé est `MODULE_module`. La liste par défaut des
+ modules à charger contient un seul élément, `base`, ce qui signifie que le
+ module `base_module` est chargé. Les modules permettent de définir de la
+ logique pour les objets, ou l'outil à lancer.
+
+`-r, --func FUNC`
+: spécifier le nom de la fonction à lancer après le chargement des modules et
+ des fichiers de configuration. La valeur par défaut est `base.query`, qui
+ interroge la base de données et affiche son contenu.
+
+ La fonction est appelée avec les arguments de la ligne de commande, sans les
+ options, qui sont traitées en amont.
+
+`--dump`
+: afficher le contenu complet de la base de données, pour débugger. ignorer
+ l'option `-r`
+
+# Module base
+
+Le module `base` chargé par défaut définit
+* les objets de type `deploydb`, `host`
+* la fonction `query()`
+
+## deploydb
+
+En créant des instances de cet objet avec des identifiants normalisés, il est
+possible de modifier la configuration.
+
+`deploydb path dir=CONFDIR`
+: définir les répertoires de recherche pour les fichiers de configuration
+ spécifiés sans chemin. dans ce document, cette valeur est appelée
+ `deploydb:path`
+
+`deploydb include file=CONFIG`
+: définir des fichiers de configuration supplémentaire à charger. Si les
+ fichiers sont spécifiés sans chemin, il sont cherchés dans `deploydb:path`
+
+`deploydb loadcsv file=CSVFILE`
+: charger des définitions d'objets depuis des fichiers CSV. Si les fichiers sont
+ spécifiés sans chemin, ils sont cherchés dans `deploydb:path`
+
+ L'attribut `otype_col` qui vaut par défaut `otype` permet de définir la
+ colonne qui contient le type d'objet. L'attribut `otype` permet de spécifier
+ le type d'objet si la colonne n'existe pas dans le fichier.
+
+ L'attribut `oid_col` qui vaut par défaut `oid` permet de définir la colonne
+ qui contient l'identifiant d'objet à créer.
+
+ Toutes les autres colonnes du fichier sont utilisées pour définir les
+ attributs des objets.
+
+## host
+
+Cet objet définit un hôte vers lequel on peut par exemple déployer un artifact.
+
+`host * domain=DOMAIN.TLD`
+: définir un domaine par défaut pour tous les hôtes spécifiés sans domaine
+
+Les attributs suivants sont supportés:
+
+`host`
+: nom d'hôte. le domaine par défaut est ajouté le cas échéant. pour ne pas
+ rajouter un domaine, spécifier le nom avec un point final e.g `localhost.`
+
+`hostname`
+: nom d'hôte sans le domaine
+
+`domain`
+: domaine sans le nom d'hôte
+
+`ip`
+: adresse IP de l'hôte
+
+Si seul `host` est spécifié, `hostname` et `domain` sont calculés en fonction de
+sa valeur.
+
+Si seul `hostname` est spécifié, `host` est calculé en fonction de sa valeur et
+de celle de `domain`
+
+Si `ip` n'est pas spécifié, une résolution DNS est effectuée pour déterminer
+l'adresse de `host`
+
+Si l'objet est défini sans valeurs, alors la valeur finale est la liste des hôtes.
+
+## base.query()
+
+Interroger la base de données
+
+XXX déterminer le format des requêtes
+
+-*- coding: utf-8 mode: markdown -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8:noeol:binary
\ No newline at end of file
diff --git a/lib/nulib/doc/index.md b/lib/nulib/doc/index.md
new file mode 100644
index 0000000..a478e7e
--- /dev/null
+++ b/lib/nulib/doc/index.md
@@ -0,0 +1,21 @@
+# nulib
+
+nulib est une librairie de fonctions shell et python, ainsi qu'un
+ensemble d'utilitaires basés sur ces librairies
+
+## Prérequis
+
+nulib est conçu pour tourner sur des versions récentes de Linux et
+requière bash 4.1+, GNUawk 3.1+ et Python 2.6
+
+Les systèmes cibles sont Debian 8+ (jessie, stretch) et Oracle Linux 6+
+
+| Système | bash | GNUawk | Python |
++----------------+------+--------+--------+
+| RHEL6, OL6 | 4.1 | 3.1 | 2.6 |
+| RHEL7, OL7 | 4.2 | 4.0 | 2.7 |
+| Debian 8 | 4.3 | 4.1 | 2.7 |
+| Debian 9 | 4.4 | 4.1 | 2.7 |
+
+
+-*- coding: utf-8 mode: markdown -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8:noeol:binary
\ No newline at end of file
diff --git a/lib/nulib/load.sh b/lib/nulib/load.sh
new file mode 100644
index 0000000..b6ad3c1
--- /dev/null
+++ b/lib/nulib/load.sh
@@ -0,0 +1,176 @@
+##@cooked comments # -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+## Charger nulib et rendre disponible les modules bash, awk et python
+##@cooked nocomments
+# Ce fichier doit être sourcé en premier. Si ce fichier n'est pas sourcé, alors
+# le répertoire nulib doit être disponible dans le répertoire du script qui
+# inclue ce fichier.
+# Une fois ce fichier sourcé, les autres modules peuvent être importés avec
+# require:() ou import:() e.g.
+# source /etc/nulib.sh || exit 1
+# import: other_modules
+# ou pour une copie locale de nulib:
+# source "$(dirname "$0")/nulib/load.sh" || exit 1
+# import: other_modules
+
+# vérifier version minimum de bash
+if [ "x$BASH" = "x" ]; then
+ echo "ERROR: nulib: this script requires bash"
+ exit 1
+fi
+
+function base_eerror() { echo "ERROR: $*" 1>&2; }
+function base_die() { [ $# -gt 0 ] && base_eerror "$*"; exit 1; }
+function base_edie() { [ $# -gt 0 ] && base_eerror "$*"; return 1; }
+function base_delpath() { local _qdir="${1//\//\\/}"; eval "export ${2:-PATH}; ${2:-PATH}"'="${'"${2:-PATH}"'#$1:}"; '"${2:-PATH}"'="${'"${2:-PATH}"'%:$1}"; '"${2:-PATH}"'="${'"${2:-PATH}"'//:$_qdir:/:}"; [ "$'"${2:-PATH}"'" == "$1" ] && '"${2:-PATH}"'='; }
+function base_addpath() { local _qdir="${1//\//\\/}"; eval "export ${2:-PATH}; "'[ "${'"${2:-PATH}"'#$1:}" == "$'"${2:-PATH}"'" -a "${'"${2:-PATH}"'%:$1}" == "$'"${2:-PATH}"'" -a "${'"${2:-PATH}"'//:$_qdir:/:}" == "$'"${2:-PATH}"'" -a "$'"${2:-PATH}"'" != "$1" ] && '"${2:-PATH}"'="${'"${2:-PATH}"':+$'"${2:-PATH}"':}$1"'; }
+function base_inspathm() { local _qdir="${1//\//\\/}"; eval "export ${2:-PATH}; "'[ "${'"${2:-PATH}"'#$1:}" == "$'"${2:-PATH}"'" -a "${'"${2:-PATH}"'%:$1}" == "$'"${2:-PATH}"'" -a "${'"${2:-PATH}"'//:$_qdir:/:}" == "$'"${2:-PATH}"'" -a "$'"${2:-PATH}"'" != "$1" ] && '"${2:-PATH}"'="$1${'"${2:-PATH}"':+:$'"${2:-PATH}"'}"'; }
+function base_inspath() { base_delpath "$@"; base_inspathm "$@"; }
+
+if [ ${BASH_VERSINFO[0]} -ge 5 -o \( ${BASH_VERSINFO[0]} -eq 4 -a ${BASH_VERSINFO[1]} -ge 1 \) ]; then :
+elif [ -n "$NULIB_IGNORE_BASH_VERSION" ]; then :
+else base_die "nulib: bash 4.1+ is required"
+fi
+
+# Calculer emplacement de nulib
+NULIBDIR="@@dest@@/lib/nulib"
+if [ "$NULIBDIR" = "@@""dest""@@/lib/nulib" ]; then
+ # La valeur "@@"dest"@@" n'est remplacée que dans la copie de ce script
+ # faite dans /etc. Sinon, il faut toujours faire le calcul. Cela permet de
+ # déplacer la librairie n'importe ou sur le disque, ce qui est
+ # particulièrement intéressant quand on fait du déploiement.
+ NULIBDIR="${BASH_SOURCE[0]}"
+ if [ -f "$NULIBDIR" -a "$(basename -- "$NULIBDIR")" == load.sh ]; then
+ # Fichier sourcé depuis nulib/
+ NULIB_SOURCED=1
+ NULIBDIR="$(dirname -- "$NULIBDIR")"
+ elif [ -f "$NULIBDIR" -a "$(basename -- "$NULIBDIR")" == nulib.sh ]; then
+ # Fichier sourcé depuis nulib/bash
+ NULIB_SOURCED=1
+ NULIBDIR="$(dirname -- "$NULIBDIR")/.."
+ else
+ # Fichier non sourcé. Tout exprimer par rapport au script courant
+ NULIB_SOURCED=
+ NULIBDIR="$(dirname -- "$0")"
+ if [ -d "$NULIBDIR/nulib" ]; then
+ NULIBDIR="$NULIBDIR/nulib"
+ elif [ -d "$NULIBDIR/lib/nulib" ]; then
+ NULIBDIR="$NULIBDIR/lib/nulib"
+ fi
+ fi
+fi
+NULIBDIR="$(cd "$NULIBDIR" 2>/dev/null; pwd)"
+NULIBDIRS=("$NULIBDIR/bash")
+
+# marqueur pour vérifier que nulib a réellement été chargé. il faut avoir $NULIBINIT == $NULIBDIR
+# utilisé par le module base qui doit pouvoir être inclus indépendamment
+NULIBINIT="$NULIBDIR"
+
+## Modules bash
+NULIB_LOADED_MODULES=(nulib.sh)
+NULIB_DEFAULT_MODULES=(base pretty sysinfos)
+
+# Si cette variable est non vide, require: recharge toujours le module, même
+# s'il a déjà été chargé. Cette valeur n'est pas transitive: il faut toujours
+# recharger explicitement tous les modules désirés
+NULIB_FORCE_RELOAD=
+
+function nulib__define_functions() {
+ function nulib_check_loaded() {
+ local module
+ for module in "${NULIB_LOADED_MODULES[@]}"; do
+ [ "$module" == "$1" ] && return 0
+ done
+ return 1
+ }
+ function module:() {
+ NULIB_MODULE="$1"
+ NULIB_FUNC_PREFIX="$2"
+ if ! nulib_check_loaded "$1"; then
+ NULIB_LOADED_MODULES=("${NULIB_LOADED_MODULES[@]}" "$1")
+ fi
+ }
+ function function:() {
+ if [ -n "$NULIB_ALLOW_IMPORT" -a -n "$NULIB_FUNC_PREFIX" -a "${1#$NULIB_FUNC_PREFIX}" != "$1" ]; then
+ eval "function ${1#$NULIB_FUNC_PREFIX}() { $1 \"\$@\"; }"
+ fi
+ }
+}
+
+function nulib__require:() {
+ local nr__module nr__nulibdir nr__found
+ [ $# -gt 0 ] || set DEFAULTS
+
+ # sauvegarder valeurs globales
+ local nr__orig_module="$NULIB_MODULE" nr__orig_func_prefix="$NULIB_FUNC_PREFIX"
+ NULIB_MODULE=
+ NULIB_FUNC_PREFIX=
+
+ # garder une copie de la valeur originale et casser la transitivité
+ local nr__force_reload="$NULIB_FORCE_RELOAD"
+ local NULIB_FORCE_RELOAD
+
+ local nr__should_import="$NULIB_SHOULD_IMPORT" nr__allow_import="$NULIB_ALLOW_IMPORT" nr__recursive_import="$NULIB_RECURSIVE_IMPORT"
+ for nr__module in "$@"; do
+ local NULIB_SHOULD_IMPORT="$nr__should_import" NULIB_ALLOW_IMPORT="$nr__allow_import" NULIB_RECURSIVE_IMPORT="$nr__recursive_import"
+ [ -n "$NULIB_SHOULD_IMPORT" ] && NULIB_ALLOW_IMPORT=1
+ nr__found=
+ for nr__nulibdir in "${NULIBDIRS[@]}"; do
+ if [ -f "$nr__nulibdir/$nr__module" ]; then
+ nr__found=1
+ if [ -n "$nr__force_reload" ] || ! nulib_check_loaded "$nr__module"; then
+ NULIB_LOADED_MODULES=("${NULIB_LOADED_MODULES[@]}" "$nr__module")
+ source "$nr__nulibdir/$nr__module" || base_die
+ fi
+ break
+ fi
+ done
+ if [ -z "$nr__found" -a "$nr__module" == DEFAULTS ]; then
+ for nr__module in "${NULIB_DEFAULT_MODULES[@]}"; do
+ if [ -f "$nr__nulibdir/$nr__module" ]; then
+ nr__found=1
+ if [ -n "$nr__force_reload" ] || ! nulib_check_loaded "$nr__module"; then
+ NULIB_LOADED_MODULES=("${NULIB_LOADED_MODULES[@]}" "$nr__module")
+ source "$nr__nulibdir/$nr__module" || base_die
+ fi
+ else
+ break
+ fi
+ done
+ fi
+ [ -n "$nr__found" ] || base_die "nulib: unable to find module $nr__module in (${NULIBDIRS[*]})"
+ done
+
+ # restaurer valeurs globales
+ NULIB_MODULE="$nr__orig_module"
+ NULIB_FUNC_PREFIX="$nr__orig_func_prefix"
+}
+
+function require:() {
+ [ -z "$NULIB_NO_DISABLE_SET_X" ] && [[ $- == *x* ]] && { set +x; local NULIB_REQUIRE_SET_X=1; }; if [ -n "$NULIB_REQUIRE_SET_X" ]; then [ -n "$NULIB_REQUIRE_SET_X_RL1" ] || local NULIB_REQUIRE_SET_X_RL1; local NULIB_REQUIRE_SET_X_RL2=$RANDOM; [ -n "$NULIB_REQUIRE_SET_X_RL1" ] || NULIB_REQUIRE_SET_X_RL1=$NULIB_REQUIRE_SET_X_RL2; fi # désactiver set -x de manière réentrante
+ local NULIB_SHOULD_IMPORT
+ [ -n "$NULIB_RECURSIVE_IMPORT" -a -n "$NULIB_ALLOW_IMPORT" ] && NULIB_SHOULD_IMPORT=1
+ local NULIB_ALLOW_IMPORT NULIB_RECURSIVE_IMPORT NULIB_FUNC_PREFIX
+ nulib__define_functions
+ nulib__require: "$@"
+ [ -n "$NULIB_REQUIRE_SET_X" -a "$NULIB_REQUIRE_SET_X_RL1" == "$NULIB_REQUIRE_SET_X_RL2" ] && set -x
+ return 0
+}
+
+function import:() {
+ [ -z "$NULIB_NO_DISABLE_SET_X" ] && [[ $- == *x* ]] && { set +x; local NULIB_REQUIRE_SET_X=1; }; if [ -n "$NULIB_REQUIRE_SET_X" ]; then [ -n "$NULIB_REQUIRE_SET_X_RL1" ] || local NULIB_REQUIRE_SET_X_RL1; local NULIB_REQUIRE_SET_X_RL2=$RANDOM; [ -n "$NULIB_REQUIRE_SET_X_RL1" ] || NULIB_REQUIRE_SET_X_RL1=$NULIB_REQUIRE_SET_X_RL2; fi # désactiver set -x de manière réentrante
+ local NULIB_SHOULD_IMPORT=1 NULIB_ALLOW_IMPORT NULIB_RECURSIVE_IMPORT NULIB_FUNC_PREFIX
+ nulib__define_functions
+ nulib__require: "$@"
+ [ -n "$NULIB_REQUIRE_SET_X" -a "$NULIB_REQUIRE_SET_X_RL1" == "$NULIB_REQUIRE_SET_X_RL2" ] && set -x
+ return 0
+}
+
+## Autres modules
+base_inspath "$NULIBDIR/awk" AWKPATH; export AWKPATH
+base_inspath "$NULIBDIR/python" PYTHONPATH; export PYTHONPATH
+
+## Auto import DEFAULTS
+nulib__define_functions
+if [ -n "$NULIB_SOURCED" -a -z "$NULIB_NO_IMPORT_DEFAULTS" ]; then
+ import: DEFAULTS
+fi
diff --git a/lib/nulib/nulib_config.py b/lib/nulib/nulib_config.py
new file mode 100644
index 0000000..47c2751
--- /dev/null
+++ b/lib/nulib/nulib_config.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+# fichier d'exemple pour la configuration de nulib, à placer quelque part dans
+# PYTHONPATH
+
+"""Configuration de nulib.
+
+Ce module contient des variables qui servent à configurer le comportement de
+nulib.
+"""
+
+# Liste des modules de base qui sont importés automatiquement avec
+# from nulib_py import *
+#MODULES = ()
+
+# L'importation de nulib.base.encoding provoque-t-il la configuration de la
+# locale courante?
+#SET_LOCALE = True
+
+# Encoding par défaut, s'il est impossible de le détecter autrement.
+#DEFAULT_INPUT_ENCODING = 'utf-8'
+#DEFAULT_OUTPUT_ENCODING = 'utf-8'
+
+# Faut-il supprimer le répertoire courant de sys.path?
+#CLEAN_SYSPATH = True
diff --git a/lib/nulib/pshell b/lib/nulib/pshell
new file mode 100755
index 0000000..36d7b49
--- /dev/null
+++ b/lib/nulib/pshell
@@ -0,0 +1,13 @@
+#!/bin/bash
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+scriptdir="$(dirname -- "$0")"
+if [ -z "$NULIBDIR" -o "$NULIBDIR" != "$NULIBINIT" ]; then
+ # charger nulib si ce n'est pas déjà le cas
+ source "$scriptdir/load.sh"
+fi
+
+DEFAULT_PYTHON=python2.7
+
+#
+echo ">>> Shell Python pour nulib"
+exec "${PYTHON:-$DEFAULT_PYTHON}" -i -c "$(<"$scriptdir/pshell.py")"
diff --git a/lib/nulib/pshell.py b/lib/nulib/pshell.py
new file mode 100644
index 0000000..3938e2b
--- /dev/null
+++ b/lib/nulib/pshell.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+# initialisation pour pshell
+
+import sys, os
+from os import path
+import pdb
+
+from nulib import *
+from nulib.web import ui
+from nulib.web import bootstrap as bs
diff --git a/lib/nulib/python/.gitignore b/lib/nulib/python/.gitignore
new file mode 100644
index 0000000..b9b295a
--- /dev/null
+++ b/lib/nulib/python/.gitignore
@@ -0,0 +1,3 @@
+/build/
+*~
+*.py[co]
diff --git a/lib/nulib/python/deploydb/__init__.py b/lib/nulib/python/deploydb/__init__.py
new file mode 100644
index 0000000..9d853e8
--- /dev/null
+++ b/lib/nulib/python/deploydb/__init__.py
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ()
+
diff --git a/lib/nulib/python/deploydb/__main__.py b/lib/nulib/python/deploydb/__main__.py
new file mode 100644
index 0000000..50bd39e
--- /dev/null
+++ b/lib/nulib/python/deploydb/__main__.py
@@ -0,0 +1,156 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+"""Interroger la base deploydb
+"""
+
+import logging; log = logging.getLogger(__name__)
+import sys
+from os import path
+from argparse import ArgumentParser
+
+from .utils import *
+from .parser import Parser
+from .objects import catalog
+
+DEFAULT_CONFIG = 'deploydb.conf'
+USER_CONFDIR = path.expanduser('~/etc/deploy')
+SYSTEM_CONFDIR = '/var/local/deploy'
+
+DEFAULT_MODULES = ['base']
+DEFAULT_FUNC = 'base.query'
+DEFAULT_ACTION = 'run'
+
+SCRIPTDIR = path.dirname(path.dirname(path.dirname(__file__)))
+
+################################################################################
+# Programme principal
+
+logging.basicConfig()
+
+from argparse import ArgumentParser, HelpFormatter
+if sys.argv[1:2] == ['--compat']:
+ # Avec l'argument --compat, désactiver la classe FancyHelpFormatter qui
+ # se base sur une API non documentée
+ sys.argv = sys.argv[0:1] + sys.argv[2:]
+ FancyHelpFormatter = HelpFormatter
+else:
+ class FancyHelpFormatter(HelpFormatter):
+ """Comme HelpFormatter, mais ne touche pas aux lignes qui commencent par les
+ caractères '>>>'. Cela permet de mixer du texte formaté et du texte non
+ formaté.
+ """
+ def _fill_text(self, text, width, indent):
+ return ''.join([indent + line for line in text.splitlines(True)])
+ def _split_lines(self, text, width):
+ lines = ['']
+ for line in text.splitlines():
+ if line.startswith('>>>'):
+ lines.append(line)
+ lines.append('')
+ else:
+ lines[-1] += '\n' + line
+ lines = filter(None, lines)
+ texts = []
+ for line in lines:
+ if line.startswith('>>>'):
+ line = line[3:]
+ if line: texts.append(line)
+ else:
+ texts.extend(super(FancyHelpFormatter, self)._split_lines(line, width))
+ return texts
+AP = ArgumentParser(
+ usage=u"deploydb args...",
+ description=__doc__,
+ formatter_class=FancyHelpFormatter,
+)
+AP.set_defaults(
+ missing_ok=False,
+ modules=DEFAULT_MODULES,
+ func=DEFAULT_FUNC,
+ resolve=True,
+ action=DEFAULT_ACTION,
+)
+AP.add_argument('-c', '--config', dest='config',
+ help=u"Spécifier le fichier de configuration à utiliser. S'il s'agit d'un nom sans chemin, il est recherché dans les répertoires par défaut.")
+AP.add_argument('--missing-ok', action='store_true', dest='missing_ok',
+ help=u"Sortir sans erreur si le fichier de configuration n'est pas trouvé")
+AP.add_argument('-m', '--module', action='append', dest='modules', metavar='MODULE',
+ help=u"Spécifier un module à charger. Cette option peut être spécifiée autant de fois que nécessaire. Par défaut, seul le module 'base' est chargé.")
+AP.add_argument('-r', '--func', dest='func',
+ help=u"Spécifier la fonction à lancer après le chargement de la base de données. La valeur par défaut est %s" % DEFAULT_FUNC)
+AP.add_argument('--no-resolve', action='store_false', dest='resolve',
+ help=u"Ne pas résoudre les objets avant de lancer la fonction. Cette option avancée ne devrait pas avoir besoin d'être utilisée.")
+AP.add_argument('--run', action='store_const', dest='action', const='run',
+ help=u"Lancer la fonction spécifiée avec l'option --func")
+AP.add_argument('--dump', action='store_const', dest='action', const='dump',
+ help=u"Afficher le contenu de la base de données")
+AP.add_argument('args', nargs='*')
+o = AP.parse_args()
+
+# charger les modules
+MODULES = {}
+for module in o.modules:
+ MODULES[module] = __import__('%s_module' % module, globals())
+
+# charger la configuration
+config = o.config
+if config is not None and ('/' in config or path.isfile(config)):
+ deploydb_path = [path.abspath(path.dirname(config))]
+else:
+ deploydb_path = [USER_CONFDIR, SYSTEM_CONFDIR, SCRIPTDIR]
+ cname = config if config is not None else DEFAULT_CONFIG
+ config = find_in_path(cname, deploydb_path)
+ if config is None and not o.missing_ok:
+ raise ValueError("Impossible de trouver le fichier de configuration %s" % cname)
+
+catalog.create_object('deploydb', 'path', dir=deploydb_path)
+Parser(config)
+
+dd_path = catalog.get('deploydb', 'path')
+dd_include = catalog.get('deploydb', 'include', None, create=False)
+if dd_include is not None:
+ included = set([config])
+ while True:
+ done = True
+ for file in dd_include.get('file', ()):
+ # cette valeur peut changer au fur et à mesure que les fichiers sont
+ # inclus. la recharger systématiquement
+ deploydb_path = dd_path.get('dir', ())
+ pf = find_in_path(file, deploydb_path)
+ if pf in included: continue
+ included.add(pf)
+ if pf is not None:
+ Parser(pf)
+ # il y a peut-être de nouveaux fichiers à inclure. configurer
+ # une nouvelle itération
+ done = False
+ else:
+ log.warning("deploydb:include: %s: Fichier introuvable", file)
+ if done: break
+
+deploydb_path = dd_path.get('dir', ())
+dd_loadcsv = catalog.get('deploydb', 'loadcsv', None, create=False)
+if dd_loadcsv is not None:
+ otype = dd_loadcsv.first('otype', None)
+ otype_col = dd_loadcsv.first('otype_col', 'otype')
+ oid_col = dd_loadcsv.first('oid_col', 'oid')
+ for file in dd_loadcsv.get('file', ()):
+ pf = find_in_path(file, deploydb_path)
+ if pf is not None:
+ catalog.load_csv(pf, otype, otype_col, oid_col)
+ else:
+ log.warning("deploydb:loadcsv: %s: Fichier introuvable", file)
+
+# actions
+if o.resolve: catalog.resolve()
+
+if o.action == 'run':
+ names = o.func.split('.')
+ func = MODULES[names[0]]
+ for name in names[1:]:
+ func = getattr(func, name)
+ func(*o.args)
+
+elif o.action == 'dump':
+ catalog.dump()
diff --git a/lib/nulib/python/deploydb/base_module.py b/lib/nulib/python/deploydb/base_module.py
new file mode 100644
index 0000000..b63c86d
--- /dev/null
+++ b/lib/nulib/python/deploydb/base_module.py
@@ -0,0 +1,239 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = (
+ 'Deploydb',
+ 'Host',
+ 'host_matcher', 'hostname_matcher',
+)
+
+import logging; log = logging.getLogger(__name__)
+import os, sys, socket, csv
+from os import path
+
+from .utils import *
+from .objects import XT, fileP, pathP, lowerP, Object, catalog
+
+################################################################################
+# Configuration de deploydb
+
+class Deploydb(Object):
+ ATTRS = XT(Object, dir=pathP, file=fileP)
+
+################################################################################
+# Gestion des groupes
+
+class Group(Object):
+ """Groupe d'objets liés.
+
+ Lors du resolve, toutes les variables définies pour le groupe sont propagées
+ aux objets liés si elles ne sont pas définies dans ces objets.
+
+ dans l'exemple suivant:
+ ~~~
+ group mymodules shared=all
+ -module module1,module2
+ moduledir=~/wop/php
+ -host host1,host2
+ domain=long.tld
+ ~~~
+ la variable shared est initialisée pour module1,module2,host1,host2 alors que
+ la variable moduledir ne concerne que module1,module2 et la variable domain ne
+ concerne que host1,host2
+ """
+
+ __RESOLVE_FIRST__ = True
+
+ ATTRS = XT(Object)
+
+ def _resolve(self, catalog):
+ for otype, links in self.links.items():
+ for link in links:
+ object = link.resolve(catalog, resolve=False)
+ object.set_defaults(link.attrs)
+ object.set_defaults(self.attrs)
+
+################################################################################
+# Gestion des hôtes
+
+def withdomain(h): return '.' in h
+
+def fix_host(host, domain=None):
+ if host.endswith('.'):
+ host = host[:-1]
+ elif domain and not withdomain(host):
+ host = "%s.%s" % (host, domain)
+ return host
+def strip_hostname(a):
+ pos = a.find('.')
+ if pos == -1: return None
+ else: return a[pos + 1:]
+def strip_domain(a):
+ pos = a.find('.')
+ if pos == -1: return a
+ else: return a[:pos]
+
+def match_host(qhost, object):
+ qhost = lowerP.parse(qhost)
+ if withdomain(qhost): # host avec chemin
+ qhost = fix_host(qhost)
+ return qhost in object.get('host', ())
+ else: # nom de host
+ return qhost in object.get('hostname', ())
+def host_matcher(qhost):
+ return lambda object: match_host(qhost, object)
+
+def match_hostname(qhost, object):
+ qhost = lowerP.parse(qhost)
+ qhost = path.basename(qhost)
+ return qhost in object.get('hostname', ())
+def hostname_matcher(qhost):
+ return lambda object: match_hostname(qhost, object)
+
+class Host(Object):
+ ATTRS = XT(Object,
+ values=lowerP,
+ host=lowerP, hostname=lowerP, domain=lowerP, ip=None)
+
+ def _resolve(self, catalog):
+ if self.oid == '*': return
+ default = catalog.get(self.otype, '*', None, False)
+
+ hosts = self.get('host', [])
+ hostnames = self.get('hostname', ())
+ domains = self.get('domain', ())
+
+ search_basedir = self.get('search_basedir', ('dirs',))
+ files = 'files' in search_basedir
+ dirs = 'dirs' in search_basedir
+ basedir = self.get('basedir', None)
+ if basedir is not None:
+ hostdirs = self.resolve_basedir(basedir, files=files, dirs=dirs)
+ hosts.extend(map(path.basename, hostdirs))
+ dirspec = self.get('dirspec', None)
+ if dirspec is not None:
+ hostdirs = self.resolve_filespec(dirspec, dirs=True)
+ hosts.extend(map(path.basename, hostdirs))
+ filespec = self.get('filespec', None)
+ if filespec is not None:
+ hostfiles = self.resolve_filespec(filespec, files=True)
+ hosts.extend(map(path.basename, hostfiles))
+
+ if hosts:
+ # générer hostname et domain à partir host
+ if not domains:
+ domains = set(map(strip_hostname, hosts))
+ domains = filter(lambda d: d is not None, domains)
+ if not domains and default is not None:
+ domains = default.get('domain', ())
+ domains = filter(None, domains)
+ if domains: domains = self.domain = set(domains)
+
+ hostnames = map(strip_domain, hostnames or hosts)
+ if hostnames: hostnames = self.hostname = set(hostnames)
+
+ if domains:
+ tmphosts = []
+ for host in hosts:
+ for domain in domains:
+ tmphosts.append(fix_host(host, domain))
+ else:
+ tmphosts = map(fix_host, hosts)
+ hosts = self.host = set(tmphosts)
+
+ else:
+ # générer host à partir de hostname et domain
+ if not domains and default is not None:
+ domains = default.get('domain', ())
+ if domains: domains = self.domain = set(domains)
+
+ if not hostnames: hostnames = [self.oid]
+ hostnames = map(strip_domain, hostnames)
+ if hostnames: self.hostname = hostnames
+
+ if domains:
+ hosts = []
+ for domain in domains:
+ for hostname in hostnames:
+ hosts.append('%s.%s' % (hostname, domain))
+ else:
+ hosts = hostnames
+ if hosts: hosts = self.host = set(hosts)
+
+ ips = self.get('ip', [])
+ if not ips:
+ for host in hosts:
+ try:
+ hostnames, aliases, ipaddrs = socket.gethostbyname_ex(host)
+ ips.extend(ipaddrs)
+ except socket.herror, e:
+ log.error("error resolving %s: %s, %s", host, e[0], e[1])
+ except socket.gaierror, e:
+ log.error("error resolving %s: %s, %s", host, e[0], e[1])
+ if ips: ips = self.ip = set(ips)
+
+ if not self.values:
+ self.values = hosts
+
+def save_hosts(*args):
+ """Ecrire les hôtes définis sous forme de liste csv, qu'il est possible
+ d'exploiter avec 'deploydb loadcsv'
+
+ plus ou moins équivalent à `save_objects host` mais les champs sont dans un
+ ordre ergonomique (cette fonction a été écrite en premier, elle est gardée
+ pour l'historique)
+ """
+ # tout d'abord déterminer tous les attributs nécessaires
+ headers = ['host', 'hostname', 'domain', 'ip']
+ hosts = catalog.find_objects('host')
+ for host in hosts:
+ for name in host.attrs.keys():
+ if name not in headers: headers.append(name)
+ # ensuite les écrire
+ rows = []
+ for host in hosts:
+ if host.oid == '*': continue
+ row = [host.otype, host.oid]
+ for name in headers:
+ row.append(';'.join(host.get(name, ())))
+ rows.append(row)
+ headers[0:0] = ['otype', 'oid']
+ # écrire le résultat
+ out = csv.writer(sys.stdout)
+ out.writerow(headers)
+ out.writerows(rows)
+
+################################################################################
+# Actions
+
+def save_objects(*args):
+ """Ecrire les objets sous forme de liste csv, qu'il est possible d'exploiter
+ avec 'deploydb loadcsv'
+
+ usage: save_objects [otype [oids...]]
+ """
+ otypes = listof(args[0] if args[0:1] else None, None)
+ if otypes is not None: otypes = flattenstr(otypes)
+ oids = args[1:] or None
+ objects = catalog.find_objects(otypes, oids, create=False)
+ # tout d'abord déterminer tous les attributs nécessaires
+ headers = ['otype', 'oid']
+ for object in objects:
+ for name in object.known_rw_attrs:
+ if name not in headers: headers.append(name)
+ for object in objects:
+ for name in object.misc_attrs:
+ if name not in headers: headers.append(name)
+ # ensuite les écrire
+ rows = []
+ for object in objects:
+ row = []
+ for name in headers:
+ row.append(';'.join(object.get(name, ())))
+ rows.append(row)
+ # écrire le résultat
+ out = csv.writer(sys.stdout)
+ out.writerow(headers)
+ out.writerows(rows)
+
+def query(*args):
+ pass
diff --git a/lib/nulib/python/deploydb/expr.py b/lib/nulib/python/deploydb/expr.py
new file mode 100644
index 0000000..45c5005
--- /dev/null
+++ b/lib/nulib/python/deploydb/expr.py
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = (
+ 'ANY', 'ALL', 'NONE', 'EXISTS',
+)
+
+from .utils import *
+
+class Expr(object):
+ _terms, terms = None, property(lambda self: self._terms)
+
+ def __init__(self, *terms):
+ self._terms = listof(terms or None, None)
+
+ @staticmethod
+ def match_dict(dict, object):
+ for name, value in dict.items():
+ one_match = False
+ attr_values = object.get(name, ())
+ for value in listof(value):
+ if value in attr_values:
+ one_match = True
+ break
+ if not one_match: return False
+ return True
+
+ @staticmethod
+ def match_term(term, object):
+ """tester le terme par rapport à l'objet.
+ * si c'est None, retourner vrai
+ * si c'est un dictionnaire, tous les attributs cités doivent avoir au
+ moins une des valeurs fournies
+ * si c'est une fonction (plus exactement un objet appelable), elle doit
+ prendre l'unique argument (object) et retourner True si l'objet
+ correspond
+ * si c'est une liste, la traiter comme ANY(*term)
+ * si c'est une instance de Expr, déléguer le traitement à sa méthode
+ match()
+ * sinon, lancer une exception.
+ """
+ if term is None:
+ return True
+ elif isinstance(term, dict):
+ return Expr.match_dict(term, object)
+ elif callable(term):
+ return term(object)
+ elif isseq(term):
+ term = ANY(*term)
+ if isinstance(term, Expr):
+ return term.match(object)
+ else:
+ raise ValueError("Argument invalide %r" % term)
+
+ def match(self, object):
+ return False
+
+class ANY(Expr):
+ """construire l'objet avec une liste de termes. au moins un des termes doit
+ correspondre
+ """
+ def match(self, object, lazy=True):
+ result = False
+ if self.terms is None: return result
+ for term in self.terms:
+ if self.match_term(term, object):
+ result = True
+ if lazy: break
+ return result
+
+class ALL(Expr):
+ """construire l'objet avec une liste de termes. tous les termes doivent
+ correspondrent
+ """
+ def match(self, object, lazy=True):
+ result = True
+ if self.terms is None: return result
+ for term in self.terms:
+ if not self.match_term(term, object):
+ result = False
+ if lazy: break
+ return result
+
+class NONE(Expr):
+ """construire l'objet avec une liste de termes. aucun des termes ne doit
+ correspondre
+ """
+ def match(self, object, lazy=False):
+ result = True
+ if self.terms is None: return result
+ for term in self.terms:
+ if self.match_term(term, object):
+ result = False
+ if lazy: break
+ return result
+
+class EXISTS(Expr):
+ """construire l'objet avec une liste d'attributs. tous les attributs doivent
+ exister
+ """
+ def match(self, object, lazy=True):
+ result = True
+ if self.terms is None: return result
+ for term in self.terms:
+ if not object.has_key(term):
+ result = False
+ if lazy: break
+ return result
diff --git a/lib/nulib/python/deploydb/lexer.py b/lib/nulib/python/deploydb/lexer.py
new file mode 100644
index 0000000..e8fcc49
--- /dev/null
+++ b/lib/nulib/python/deploydb/lexer.py
@@ -0,0 +1,180 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+"""Lexer pour un fichier de configuration
+
+Syntaxe:
+~~~
+# comment
+object id var=value
+ continuation="line starting with a space"
+ -link otherid
+
+-link objectref1 predicate -link objectref2
+~~~
+"""
+
+__all__ = (
+ 'Lexer',
+)
+
+import re
+
+class EOL(object):
+ """fin de ligne"""
+ __repr__ = __string__ = lambda self: 'EOL'
+EOL = EOL()
+class CONTL(object):
+ """ligne de continuation"""
+ __repr__ = __string__ = lambda self: 'CONTL'
+CONTL = CONTL()
+class EOF(object):
+ """fin de fichier"""
+ __repr__ = __string__ = lambda self: 'EOF'
+EOF = EOF()
+
+class Lexer(object):
+ file = None
+ lexems = None
+ _inf = None
+ _lcount = None
+ _line = None
+
+ def __init__(self, file, parse=True):
+ self.file = file
+ if parse: self.parse()
+
+ def next_line(self):
+ line = self._inf.readline()
+ if line == '': return None
+ if line.endswith("\r\n"): line = line[:-2]
+ elif line.endswith("\n"): line = line[:-1]
+ elif line.endswith("\r"): line = line[:-1]
+ self._lcount += 1
+ self._line = line
+ return line
+
+ def is_empty(self): return self._line == ''
+ def isa_comment(self): return self._line[:1] == '#'
+ def isa_squote(self): return self._line[:1] == "'"
+ def isa_dquote(self): return self._line[:1] == '"'
+
+ RE_SPACE = re.compile(r'\s+')
+ RE_COMMENT = re.compile(r'#.*')
+ def parse_ws(self):
+ # c'est une ligne de continuation si elle commence par des espaces et ne
+ # rencontre pas de commentaire
+ contl = False
+ mo = self.RE_SPACE.match(self._line)
+ if mo is not None:
+ self._line = self._line[mo.end(0):]
+ contl = True
+ mo = self.RE_COMMENT.match(self._line)
+ if mo is not None:
+ self._line = self._line[mo.end(0):]
+ contl = False
+ return contl
+ def isa_space(self): return self.RE_SPACE.match(self._line) is not None
+ def isa_comment(self): return self.RE_COMMENT.match(self._line) is not None
+
+ RE_SQUOTE = re.compile(r"'")
+ def parse_sstring(self):
+ slos = self._lcount
+ lexem = ''
+ self._line = self._line[1:]
+ mo = self.RE_SQUOTE.search(self._line)
+ while mo is None:
+ lexem += self._line
+ if self.next_line() is None:
+ raise ValueError("unterminated quoted string starting at line %i" % slos)
+ lexem += "\n"
+ mo = self.RE_SQUOTE.search(self._line)
+ lexem += self._line[0:mo.start(0)]
+ self._line = self._line[mo.end(0):]
+ return lexem
+
+ RE_DQUOTE = re.compile(r'"')
+ def parse_dstring(self):
+ slos = self._lcount
+ lexem = ''
+ self._line = self._line[1:]
+ mo = self.RE_DQUOTE.search(self._line)
+ while mo is None:
+ lexem += self._line
+ if self.next_line() is None:
+ raise ValueError("unterminated double-quoted string starting at line %i" % slos)
+ lexem += "\n"
+ mo = self.RE_DQUOTE.search(self._line)
+ lexem += self._line[0:mo.start(0)]
+ self._line = self._line[mo.end(0):]
+ lexem = lexem.replace('\\"', '"')
+ lexem = lexem.replace("\\'", "'")
+ lexem = lexem.replace('\\\\', '\\')
+ return lexem
+
+ RE_EOS = re.compile(r'''\s|(? oclass
+ _t2cmap, t2cmap = None, property(lambda self: self._t2cmap)
+ # map otype --> id2object
+ _t2iomap, t2iomap = None, property(lambda self: self._t2iomap)
+
+ # liste d'instance de facts
+ _facts, facts = None, property(lambda self: self._facts)
+
+ # types d'objets qui doivent être résolus en premier
+ _rf_otypes = None
+
+ def __init__(self):
+ self._t2cmap = {}
+ self._t2iomap = {}
+ self._facts = []
+ self._rf_otypes = set()
+
+ def register(self, oclass, otype=None, resolve_first=False):
+ if otype is None:
+ otype = oclass.__name__.lower()
+ self._t2cmap[otype] = oclass
+ self._rf_otypes.add(otype)
+
+ ############################################################################
+ # Création
+
+ def create_object(self, otype, oid, *values, **attrs):
+ """créer un nouvel objet du type spécifié ou le mettre à jour
+ """
+ if isinstance(otype, type):
+ oclass = otype
+ otype = otype.TYPE
+ else:
+ oclass = self._t2cmap.get(otype, None)
+ if not self._t2iomap.has_key(otype):
+ self._t2iomap[otype] = {}
+ i2omap = self._t2iomap[otype]
+ if not i2omap.has_key(oid):
+ if oclass is None:
+ object = Object(oid, *values, **attrs)
+ object.otype = otype
+ else:
+ object = oclass(oid, *values, **attrs)
+ i2omap[oid] = object
+ else:
+ object = i2omap[oid]
+ if values: object.update('values', values)
+ if attrs: object.update(attrs)
+ return object
+
+ def create_fact(self, sotype, soid, verb, totype, toid, **attrs):
+ """créer un nouveau fait entre les deux objets spécifiés
+ """
+ if isinstance(sotype, type): sotype = sotype.TYPE
+ if isinstance(totype, type): totype = totype.TYPE
+ fact = Fact(sotype, soid, verb, totype, toid, **attrs)
+ self._facts.append(fact)
+ return fact
+
+ def resolve(self):
+ """résoudre tous les objets et tous les faits
+ """
+ rf_otypes = self._rf_otypes
+ # d'abord résoudre les types d'objets mentionnés dans rf_otypes
+ for otype in rf_otypes:
+ i2omap = self.t2iomap.get(otype, None)
+ if i2omap is None: continue
+ for id, object in i2omap.items():
+ object.resolve(self)
+ # puis résoudre les autres types d'objets
+ for otype, i2omap in self.t2iomap.items():
+ if otype in rf_otypes: continue
+ for id, object in i2omap.items():
+ object.resolve(self)
+ # puis résoudre tous les faits
+ for fact in self.facts:
+ fact.resolve(self)
+ return self
+
+ ############################################################################
+ # Consultation
+
+ def get(self, otype, oid, default=_RAISE_EXCEPTION, create=True, resolve=True):
+ """obtenir un objet par son type et son identifiant
+
+ par défaut, le créer s'il n'existe pas. avec create=True, l'argument
+ default est ignoré.
+
+ si create=False, default indique la valeur à retourner. lancer une
+ exception ValueError si default=_RAISE_EXCEPTION (c'est la valeur par
+ défaut)
+ """
+ object = None
+ i2omap = self._t2iomap.get(otype, None)
+ if i2omap is not None: object = i2omap.get(oid, None)
+ if object is None and not create:
+ if default is _RAISE_EXCEPTION:
+ raise ValueError("%s:%s: not found" % (otype, oid))
+ else:
+ return default
+ if object is None:
+ object = self.create_object(otype, oid)
+ if resolve:
+ object.resolve(self)
+ return object
+
+ ############################################################################
+ # Recherches
+
+ def find_tobjects(self, totype, objects, create=True, resolve=True):
+ """trouver les objets liés de type totype dans la objects
+ """
+ objects = listof(objects)
+ if totype is not None:
+ # mettre dans un dictionnaire et indexer sur oid pour éviter les
+ # doublons
+ tobjects = {}
+ for object in objects:
+ if object.otype == totype:
+ tobjects[object.oid] = object
+ else:
+ lobjects = [link.resolve(self, None, create, resolve) for link in object.get_links(totype)]
+ for lobject in lobjects:
+ if lobject is None: continue
+ tobjects[lobject.oid] = lobject
+ objects = tobjects.values()
+ return objects
+
+ def filter_objects(self, expr, objects):
+ """ne garder dans la liste objects que les objets qui correspondent à
+ l'expression.
+ """
+ objects = listof(objects)
+ return [object for object in objects if Expr.match_term(expr, object)]
+
+ def find_objects(self, otype=None, oid=None,
+ totype=None, expr=None,
+ create=True, resolve=True):
+ """chercher les objets correspondant à otype et/ou oid
+
+ si totype!=None, alors chercher les objets liés qui sont de ce type
+ """
+ otypes = listof(otype, None)
+ oids = listof(oid, None)
+ if otypes is not None and oids is not None:
+ objects = []
+ for otype in otypes:
+ i2omap = self.t2iomap.get(otype, {})
+ objects.extend([object for object in i2omap.values() if object.oid in oids])
+ elif otypes is not None and oids is None:
+ objects = []
+ for otype in otypes:
+ i2omap = self.t2iomap.get(otype, {})
+ objects.extend(i2omap.values())
+ elif oids is not None and otypes is None:
+ objects = []
+ for otype, i2omap in self.t2iomap.items():
+ objects.extend([object for object in i2omap.values() if object.oid in oids])
+ else:
+ objects = []
+ for otype, i2omap in self.t2iomap.items():
+ objects.extend(i2omap.values())
+ if resolve:
+ map(lambda object: object.resolve(self), objects)
+ objects = self.find_tobjects(totype, objects, create, resolve)
+ if expr is not None:
+ objects = self.filter_objects(expr, objects)
+ return objects
+
+ def filter_facts(self, expr, facts):
+ """ne garder dans la liste facts que les faits qui correspondent à l'expression
+ """
+ facts = listof(facts)
+ return [(fact, tsobjects, ttobjects)
+ for (fact, tsobjects, ttobjects) in facts
+ if Expr.match_term(expr, fact)]
+
+ def find_facts(self, sotype=None, soid=None, verb=None, totype=None, toid=None,
+ tsotype=None, tsexpr=None,
+ ttotype=None, ttexpr=None,
+ expr=None,
+ resolve=True):
+ """chercher les faits correspondant aux arguments
+
+ retourner une liste de tuples (fact, tsobjects, ttobjects) où
+ * fact est le fait original
+ * tsobjects sont les objets sources liés si tsotype et tsexpr sont
+ spécifiés
+ * ttobjects sont les objets destination liés si ttotype et ttexpr sont
+ spécifiés
+ """
+ sotypes = listof(sotype, None)
+ soids = listof(soid, None)
+ verbs = listof(verb, None)
+ totypes = listof(totype, None)
+ toids = listof(toid, None)
+ facts = []
+ for fact in self.facts:
+ if sotypes is not None and fact.sotype not in sotypes:
+ continue
+ if soids is not None and fact.soid not in soids:
+ continue
+ if verbs is not None and fact.verb not in verbs:
+ continue
+ if totypes is not None and fact.totype not in totypes:
+ continue
+ if toids is not None and fact.toid not in toids:
+ continue
+ tsobjects = [fact.sresolve(self, None, True)]
+ ttobjects = [fact.tresolve(self, None, True)]
+ if tsotype is not None:
+ # chercher les objets liés dans la source
+ tsobjects = self.filter_objects(tsexpr, self.find_tobjects(tsotype, tsobjects))
+ if not tsobjects: continue
+ if ttotype is not None:
+ # chercher les objets liés dans la source
+ ttobjects = self.filter_objects(ttexpr, self.find_tobjects(ttotype, ttobjects))
+ if not ttobjects: continue
+ facts.append((fact, tsobjects, ttobjects))
+ if resolve:
+ for fact, tsobjects, ttobjects in facts:
+ fact.resolve(self)
+ if expr is not None:
+ facts = self.filter_facts(expr, facts)
+ return facts
+
+ ############################################################################
+ # Divers
+
+ def dump(self):
+ self.resolve()
+ for otype, i2omap in self.t2iomap.items():
+ print "OBJECTS:%s:" % otype
+ for id, object in i2omap.items():
+ object.dump(" ")
+ if self.facts:
+ print "FACTS:"
+ for fact in self.facts:
+ fact.dump(" ")
+
+################################################################################
+# liens
+
+class Link(object):
+ """Un lien vers une référence d'un objet
+
+ Un lien a le type de l'objet cible (propriété `otype`), son identifiant
+ (propriété `oid`), et des attributs multivalués (toutes les autres
+ propriétés)
+ """
+
+ ATTRS = dict(otype=None, oid=None, attrs=None)
+
+ _rw_attrs = set(('otype', 'oid'))
+ _ro_attrs = set(('attrs',))
+ _reserved_attrs = _rw_attrs | _ro_attrs
+
+ _otype = None
+ _oid = None
+ _attrs = None
+
+ def __init__(self, otype=None, oid=None, **attrs):
+ self.__dict__['_otype'] = otype
+ self.__dict__['_oid'] = oid
+ self.__dict__['_attrs'] = {}
+ for attr, value in attrs.items():
+ self.update(attr, value)
+
+ def __parse(self, attr, value):
+ """obtenir le parser qui permet de s'assurer que value est dans le bon
+ format pour l'attribut attr.
+ """
+ if isindex(attr): parser = None
+ else: parser = self.ATTRS.get(attr, None)
+ if parser is None: return value
+ elif isseq(value): return flattenseq(map(parser.parse, value))
+ else: return parser.parse(value)
+
+ # accès aux attributs
+ def __getattr__(self, attr):
+ if attr in self._reserved_attrs:
+ return getattr(self, '_%s' % attr)
+ try:
+ return self._attrs[attr]
+ except KeyError:
+ raise AttributeError(attr)
+ def __setattr__(self, attr, value):
+ value = self.__parse(attr, value)
+ if attr in self._rw_attrs:
+ return super(Link, self).__setattr__('_%s' % attr, value)
+ elif attr in self._ro_attrs:
+ raise AttributeError(attr)
+ else:
+ self._attrs[attr] = listof(value)
+ def __delattr__(self, attr):
+ if attr in self._reserved_attrs:
+ raise AttributeError(attr)
+ try:
+ del self._attrs[attr]
+ except KeyError:
+ raise AttributeError(attr)
+ def __getitem__(self, attr):
+ if attr in self._reserved_attrs:
+ return getattr(self, '_%s' % attr)
+ else:
+ return self._attrs[attr]
+ def __setitem__(self, attr, value):
+ value = self.__parse(attr, value)
+ if attr in self._rw_attrs:
+ return super(Link, self).__setattr__('_%s' % attr, value)
+ elif attr in self._ro_attrs:
+ raise KeyError(attr)
+ else:
+ self._attrs[attr] = listof(value)
+ def __delitem__(self, attr):
+ if attr in self._reserved_attrs:
+ raise KeyError(attr)
+ else:
+ del self._attrs[attr]
+
+ def first(self, attr, default=None):
+ if attr in self._reserved_attrs:
+ return getattr(self, '_%s' % attr)
+ if self._attrs.has_key(attr):
+ values = self._attrs[attr]
+ if values: return values[0]
+ return default
+ def get(self, attr, default=None):
+ if attr in self._reserved_attrs:
+ return getattr(self, '_%s' % attr)
+ else:
+ return self._attrs.get(attr, default)
+ def has_key(self, attr):
+ """tester l'existence d'un attribut"""
+ if attr in self._reserved_attrs:
+ return True
+ else:
+ return self._attrs.has_key(attr)
+ @property
+ def known_attrs(self):
+ """obtenir une liste triée d'attributs faisant partie du schéma"""
+ return sorted(list(self.ATTRS.keys()))
+ @property
+ def misc_attrs(self):
+ """obtenir une liste triée d'attributs ne faisant pas partie du schéma"""
+ schema_attrs = set(self.ATTRS.keys())
+ defined_attrs = set(self._attrs.keys())
+ return sorted(list(defined_attrs - schema_attrs))
+ @property
+ def missing_attrs(self):
+ """obtenir une liste triée d'attributs faisant partie du schéma mais non définis"""
+ schema_attrs = set(self.ATTRS.keys())
+ defined_attrs = set(self._attrs.keys())
+ return sorted(list(schema_attrs - defined_attrs - self._reserved_attrs))
+ def update(self, attr, value=None, update_type=ADD_UNIQUE):
+ """mettre à jour l'attribut spécifié
+
+ si l'attribut n'existe pas, il est créé. sinon, la liste des valeurs de
+ l'attribut est étendue.
+
+ si value==None, aucune mise à jour n'est effectuée
+
+ si attr est une instance de dictionnaire, mettre à jour *tous* les
+ attributs spécifiés.
+
+ update_type est la méthode de mise à jour
+ """
+ if isinstance(attr, dict):
+ attrs = attr
+ for attr, value in attrs.items():
+ self.update(attr, value, update_type)
+ return self
+ if attr in self._reserved_attrs:
+ raise KeyError(attr)
+ if value is not None:
+ values = listof(self.__parse(attr, value))
+ if not self._attrs.has_key(attr):
+ self._attrs[attr] = []
+ attr = self._attrs[attr]
+ if update_type is ADD_UNIQUE:
+ for value in values:
+ if value not in attr:
+ attr.append(value)
+ elif update_type is ADD:
+ attr.extend(values)
+ elif update_type is REMOVE:
+ for value in values:
+ if value in attr:
+ attr.remove(value)
+ elif update_type is RESET_ADD:
+ attr[:] = values
+ return self
+ def set_defaults(self, attr, value=None, update_type=ADD_UNIQUE):
+ """Mettre à jour l'attribut spécifié s'il n'existe pas
+
+ si value==None, aucune mise à jour n'est effectuée
+
+ si attr est une instance de dictionnaire, mettre à jour *tous* les
+ attributs spécifiés s'ils n'existent pas.
+ """
+ if isinstance(attr, dict):
+ attrs = attr
+ for attr, value in attrs.items():
+ self.set_defaults(attr, value, update_type)
+ return self
+ if attr in self._reserved_attrs:
+ raise KeyError(attr)
+ if not self._attrs.has_key(attr):
+ self.update(attr, value, update_type)
+ return self
+
+ def clone(self):
+ """cloner ce lien"""
+ return self.__class__(self._otype, self._oid, **self._attrs)
+
+ # catalogue
+ def resolve(self, catalog, default=_RAISE_EXCEPTION, create=True, resolve=True):
+ """obtenir l'objet lié
+ """
+ return catalog.get(self.otype, self.oid, default, create, resolve)
+
+ # divers
+ def _dump_idtype(self, indent, prefix=None):
+ if prefix is None: prefix = ''
+ else: prefix = "%s " % prefix
+ print "%s%s%s:%s" % (indent, prefix, self._otype, self._oid)
+ def _dump_attrs(self, indent):
+ attrs = self._attrs
+ missing_attrs = self.missing_attrs
+ if attrs or missing_attrs:
+ print "%s attrs:" % indent
+ for name, values in attrs.items():
+ if len(values) == 1:
+ print "%s %s=%s" % (indent, name, repr(values[0]))
+ else:
+ print "%s %s=(%s)" % (indent, name, ', '.join(map(repr, values)))
+ for name in missing_attrs:
+ print "%s %s=" % (indent, name)
+ def dump(self, indent='', prefix=None):
+ """Afficher l'identifiant, le type et les attributs de ce lien
+ """
+ self._dump_idtype(indent, prefix)
+ self._dump_attrs(indent)
+
+################################################################################
+# objets
+
+class MetaObject(type):
+ def __init__(cls, name, bases, attrs):
+ type.__init__(cls, name, bases, attrs)
+ if cls.__dict__.get('TYPE', None) is None:
+ cls.TYPE = cls.__name__.lower()
+ register = not cls.__dict__.get('__NO_AUTO_REGISTER__', False)
+ resolve_first = cls.__dict__.get('__RESOLVE_FIRST__', False)
+ if register:
+ catalog.register(cls, cls.TYPE, resolve_first)
+
+class Object(object):
+ """Un objet générique
+
+ Un objet a un identifiant (propriété `oid`), un type (propriété `otype`), une
+ liste de valeurs (propriété `values`), des liens vers d'autres objets
+ (propriété `links`) et des attributs multivalués (toutes les autres propriétés).
+
+ Le type de l'objet définit un schéma, c'est à dire un ensemble d'attributs
+ spécifiques avec des valeurs par défaut. Les attributs du schéma sont les
+ attributs connus (propriété known_attrs), les autres sont les attributs
+ divers (propriété misc_attrs)
+ """
+
+ __metaclass__ = MetaObject
+ __NO_AUTO_REGISTER__ = True
+ __RESOLVE_FIRST__ = False
+
+ ATTRS = dict(otype=None, oid=None, values=None, attrs=None, links=None)
+ TYPE = 'object'
+
+ _rw_attrs = set(('otype', 'oid'))
+ _ro_attrs = set(('values', 'attrs', 'links'))
+ _reserved_attrs = _rw_attrs | _ro_attrs
+
+ _otype = None
+ _oid = None
+ _values = None
+ _attrs = None
+ _links = None
+
+ _resolved = None
+
+ def __init__(self, oid=None, *values, **attrs):
+ self.__dict__['_otype'] = self.TYPE
+ self.__dict__['_oid'] = oid
+ self.__dict__['_values'] = []
+ self.__dict__['_attrs'] = {}
+ self.__dict__['_links'] = {}
+ self.__dict__['_resolved'] = False
+ self.update('values', values)
+ for attr, value in attrs.items():
+ self.update(attr, value)
+
+ def __parse(self, attr, value):
+ """obtenir le parser qui permet de s'assurer que value est dans le bon
+ format pour l'attribut attr. Utiliser attr==None pour l'attribut values
+ """
+ if isindex(attr): attr = 'values'
+ parser = self.ATTRS.get(attr, None)
+ if parser is None: return value
+ elif isseq(value): return flattenseq(map(parser.parse, value))
+ else: return parser.parse(value)
+
+ # accès aux valeurs (via un index numérique) et aux attributs (via le nom de
+ # l'attribut)
+ def __getattr__(self, attr):
+ if attr in self._reserved_attrs:
+ return getattr(self, '_%s' % attr)
+ try:
+ if isindex(attr): return self._values[attr]
+ else: return self._attrs[attr]
+ except KeyError:
+ raise AttributeError(attr)
+ def __setattr__(self, attr, value):
+ value = self.__parse(attr, value)
+ if attr == 'values':
+ self._values[:] = listof(value)
+ elif attr in self._rw_attrs:
+ super(Object, self).__setattr__('_%s' % attr, value)
+ elif attr in self._ro_attrs:
+ raise AttributeError(attr)
+ elif attr in self.__dict__:
+ super(Object, self).__setattr__(attr, value)
+ elif isindex(attr):
+ self._values[attr] = value
+ else:
+ self._attrs[attr] = listof(value)
+ self.__dict__['_resolved'] = False
+ def __delattr__(self, attr):
+ if attr in self._reserved_attrs:
+ raise AttributeError(attr)
+ try:
+ if isindex(attr): del self._values[attr]
+ else: del self._attrs[attr]
+ except KeyError:
+ raise AttributeError(attr)
+ self.__dict__['_resolved'] = False
+ def __getitem__(self, attr):
+ if attr in self._reserved_attrs:
+ return getattr(self, '_%s' % attr)
+ elif isindex(attr):
+ return self._values[attr]
+ else:
+ return self._attrs[attr]
+ def __setitem__(self, attr, value):
+ value = self.__parse(attr, value)
+ if attr == 'values':
+ self._values[:] = listof(value)
+ elif attr in self._rw_attrs:
+ return super(Object, self).__setattr__('_%s' % attr, value)
+ elif attr in self._ro_attrs:
+ raise KeyError(attr)
+ elif isindex(attr):
+ self._values[attr] = value
+ else:
+ self._attrs[attr] = listof(value)
+ self.__dict__['_resolved'] = False
+ def __delitem__(self, attr):
+ if attr in self._reserved_attrs:
+ raise KeyError(attr)
+ elif isindex(attr):
+ del self._values[attr]
+ else:
+ del self._attrs[attr]
+ self.__dict__['_resolved'] = False
+
+ # accès spécifique aux valeurs
+ __nonzero__ = lambda self: True
+ def __len__(self):
+ """obtenir le nombre de valeurs"""
+ return len(self._values)
+ def __iter__(self):
+ """obtenir un itérateur sur les valeurs"""
+ return self._values.__iter__()
+ def __reversed__(self):
+ """obtenir la liste des valeurs inversée"""
+ return self._values.__reversed__()
+ def __contains__(self, item):
+ """tester l'existence d'une valeur"""
+ return item in self._values
+ def append(self, value):
+ """ajouter une valeur"""
+ return self._values.append(value)
+ def insert(self, index, value):
+ """insérer une valeur à la position spécifiée"""
+ return self._values.insert(index, value)
+ def extend(self, seq):
+ """étendre la liste des valeurs"""
+ return self._values.extend(seq)
+
+ # accès spécifique aux attributs
+ def first(self, attr, default=None):
+ """obtenir la première valeur de l'attribut"""
+ if attr in self._reserved_attrs:
+ return getattr(self, '_%s' % attr)
+ if self._attrs.has_key(attr):
+ values = self._attrs[attr]
+ if values: return values[0]
+ return default
+ def get(self, attr, default=None):
+ """obtenir l'attribut sous forme de liste"""
+ if attr in self._reserved_attrs:
+ return listof(getattr(self, '_%s' % attr))
+ else:
+ return self._attrs.get(attr, default)
+ def has_key(self, attr):
+ """tester l'existence d'un attribut"""
+ if attr in self._reserved_attrs:
+ return True
+ else:
+ return self._attrs.has_key(attr)
+ @property
+ def known_attrs(self):
+ """obtenir une liste triée d'attributs faisant partie du schéma"""
+ return sorted(list(self.ATTRS.keys()))
+ @property
+ def known_rw_attrs(self):
+ """obtenir une liste triée des attributs faisant partie du schéma accessibles en écriture"""
+ return sorted(list(set(self.ATTRS.keys()) - self._ro_attrs))
+ @property
+ def misc_attrs(self):
+ """obtenir une liste triée d'attributs ne faisant pas partie du schéma"""
+ schema_attrs = set(self.ATTRS.keys())
+ defined_attrs = set(self._attrs.keys())
+ return sorted(list(defined_attrs - schema_attrs))
+ @property
+ def missing_attrs(self):
+ """obtenir une liste triée d'attributs faisant partie du schéma mais non définis"""
+ schema_attrs = set(self.ATTRS.keys())
+ defined_attrs = set(self._attrs.keys())
+ return sorted(list(schema_attrs - defined_attrs - self._reserved_attrs))
+ def update(self, attr, value=None, update_type=ADD_UNIQUE):
+ """mettre à jour l'attribut spécifié
+
+ si l'attribut n'existe pas, il est créé. sinon, la liste des valeurs de
+ l'attribut est étendue.
+
+ si value==None, aucune mise à jour n'est effectuée
+
+ si attr est une instance de dictionnaire, mettre à jour *tous* les
+ attributs spécifiés.
+
+ update_type est la méthode de mise à jour
+ """
+ if isinstance(attr, dict):
+ attrs = attr
+ for attr, value in attrs.items():
+ self.update(attr, value, update_type)
+ return self
+ if attr == 'values': pass
+ elif attr in self._reserved_attrs:
+ raise KeyError(attr)
+ if value is not None:
+ values = listof(self.__parse(attr, value))
+ if attr == 'values':
+ attr = self._values
+ else:
+ if not self._attrs.has_key(attr): self._attrs[attr] = []
+ attr = self._attrs[attr]
+ if update_type is ADD_UNIQUE:
+ for value in values:
+ if value not in attr:
+ attr.append(value)
+ elif update_type is ADD:
+ attr.extend(values)
+ elif update_type is REMOVE:
+ for value in values:
+ if value in attr:
+ attr.remove(value)
+ elif update_type is RESET_ADD:
+ attr[:] = values
+ self.__dict__['_resolved'] = False
+ return self
+ def set_defaults(self, attr, value=None, update_type=ADD_UNIQUE):
+ """Mettre à jour l'attribut spécifié s'il n'existe pas
+
+ si value==None, aucune mise à jour n'est effectuée
+
+ utiliser attr==None pour mettre à jour l'attribut values
+
+ si attr est une instance de dictionnaire, mettre à jour *tous* les
+ attributs spécifiés.
+ """
+ if isinstance(attr, dict):
+ attrs = attr
+ for attr, value in attrs.items():
+ self.set_defaults(attr, value, update_type)
+ return self
+ if attr == 'values':
+ if not self._values:
+ self.update('values', value, update_type)
+ elif attr in self._reserved_attrs:
+ raise KeyError(attr)
+ elif not self._attrs.has_key(attr):
+ self.update(attr, value, update_type)
+ return self
+
+ def clone(self):
+ """cloner cet objet"""
+ o = self.__class__(self._oid, self._values, **self._attrs)
+ # XXX cloner aussi les liens
+ return o
+
+ # gestion des liens
+ def linkto(self, loi, otype=None, **attrs):
+ """lier vers une référence d'un autre objet
+
+ loi peut être:
+ * une instance de Link
+ * une instance d'Object
+ * un identifiant d'objet, auquel cas otype est requis
+
+ @return l'instance du lien créé
+ """
+ if isinstance(loi, Link):
+ create = False
+ link = loi.clone()
+ elif isinstance(loi, Object):
+ otype = loi.otype
+ oid = loi.oid
+ create = True
+ else:
+ if otype is None: raise ValueError('otype is required')
+ oid = loi
+ create = True
+ if create:
+ link = Link(otype, oid, **attrs)
+ else:
+ link.update(attrs)
+ links = self._links
+ if not links.has_key(link.otype):
+ links[otype] = []
+ links[otype].append(link)
+ self.__dict__['_resolved'] = False
+ return link
+
+ def get_links(self, otype=None, clone=False):
+ """retourner les liens vers les objets du type spécifié
+
+ si otype==None, alors retourner tous les liens
+
+ si clone==True, faire un clone des liens avant de les retourner
+ """
+ if otype is None:
+ links = []
+ for otype, tmplinks in self._links.items():
+ links.extend(tmplinks)
+ else:
+ links = listof(self._links.get(otype, ()))
+ if clone:
+ links = [link.clone() for link in links]
+ return links
+
+ # catalogue
+ def resolve_basedir(self, basedirs, dirs=False, files=False,
+ filespec=None,
+ dir_attr='dir', file_attr='file',
+ parentdir_attr='parentdir'):
+ """retourner les chemins absolus des fichiers (et/ou répertoires) trouvés dans
+ les répertoires basedirs
+
+ si les arguments dir_attr, file_attr, parentdir_attr ne sont pas None
+ (ce qui est le cas par défaut), alors l'attribut est mis à jour avec
+ respectivement les répertoires, les fichiers, et les répertoires parent
+ trouvés
+ """
+ filespecs = listof(filespec, None)
+ result = []
+ for basedir in basedirs:
+ basedir = path.expanduser(basedir)
+ basedir = path.abspath(basedir)
+ for name in os.listdir(basedir):
+ if filespecs is not None:
+ found = False
+ for filespec in filespecs:
+ if fnmatch(name, filespec):
+ found = True
+ break
+ if not found: continue
+ pf = path.join(basedir, name)
+ if path.isdir(pf) and (dirs or dirs == files):
+ result.append(pf)
+ if dir_attr is not None:
+ self.update(dir_attr, pf)
+ elif path.isfile(pf) and (files or dirs == files):
+ result.append(pf)
+ if file_attr is not None:
+ self.update(file_attr, pf)
+ if parentdir_attr is not None:
+ self.update(parentdir_attr, map(path.dirname, result))
+ return result
+ def resolve_filespec(self, filespecs, dirs=False, files=False,
+ dir_attr='dir', file_attr='file',
+ parentdir_attr='parentdir'):
+ """retourner les chemins absolus des fichiers (et/ou répertoires) correspondant
+ aux modèles filespecs (qui doivent être de type glob)
+
+ si les arguments dir_attr, file_attr, parentdir_attr ne sont pas None
+ (ce qui est le cas par défaut), alors l'attribut est mis à jour avec
+ respectivement les répertoires, les fichiers, et les répertoires parent
+ trouvés
+ """
+ result = []
+ for filespec in filespecs:
+ filespec = path.expanduser(filespec)
+ for file in glob(filespec):
+ pf = path.abspath(file)
+ if path.isdir(pf) and (dirs or dirs == files):
+ result.append(pf)
+ if dir_attr is not None:
+ self.update(dir_attr, pf)
+ elif path.isfile(pf) and (files or dirs == files):
+ result.append(pf)
+ if file_attr is not None:
+ self.update(file_attr, pf)
+ if parentdir_attr is not None:
+ self.update(parentdir_attr, map(path.dirname, result))
+ return result
+
+ def _resolve(self, catalog):
+ """à surcharger dans les classes dérivées"""
+ values = []
+ search_basedir = self.get('search_basedir', ())
+ files = 'files' in search_basedir
+ dirs = 'dirs' in search_basedir
+ basedir = self.get('basedir', None)
+ if basedir is not None:
+ values.extend(self.resolve_basedir(basedir, files=files, dirs=dirs))
+ dirspec = self.get('dirspec', None)
+ if dirspec is not None:
+ values.extend(self.resolve_filespec(dirspec, dirs=True))
+ filespec = self.get('filespec', None)
+ if filespec is not None:
+ values.extend(self.resolve_filespec(filespec, files=True))
+ if not self.values:
+ self.values = values
+
+ def resolve(self, catalog, recursive=True):
+ """normaliser cet objet et compléter les données manquantes. si recursive==True
+ (la valeur par défaut), normaliser aussi les objets liés.
+
+ @return True si l'objet a été modifié, False si l'objet avait déjà été résolu
+ """
+ if self._resolved: return False
+ self._resolve(catalog)
+ if recursive:
+ for otype, links in self.links.items():
+ for link in links:
+ link.resolve(catalog)
+ self.__dict__['_resolved'] = True
+ return True
+
+ # divers
+ def _dump_idtype(self, indent):
+ print "%s%s:%s" % (indent, self._otype, self._oid)
+ def _dump_values(self, indent):
+ values = self._values
+ if len(values) == 0:
+ pass
+ elif len(values) == 1:
+ print "%s values=%s" % (indent, repr(values[0]))
+ else:
+ print "%s values=(%s)" % (indent, ', '.join(map(repr, values)))
+ def _dump_attrs(self, indent):
+ attrs = self._attrs
+ missing_attrs = self.missing_attrs
+ if attrs or missing_attrs:
+ print "%s attrs:" % indent
+ for name, values in attrs.items():
+ if len(values) == 1:
+ print "%s %s=%s" % (indent, name, repr(values[0]))
+ else:
+ print "%s %s=(%s)" % (indent, name, ', '.join(map(repr, values)))
+ for name in missing_attrs:
+ print "%s %s=" % (indent, name)
+ def _dump_links(self, indent):
+ if self.links:
+ for ltype, links in self.links.items():
+ for link in links:
+ link.dump("%s " % indent, '+->')
+ def dump(self, indent=''):
+ """Afficher l'identifiant, le type, les valeurs, les attributs et les liens de cet objet
+ """
+ self._dump_idtype(indent)
+ self._dump_values(indent)
+ self._dump_attrs(indent)
+ self._dump_links(indent)
+
+ def __repr__(self):
+ oid = repr(self._oid)
+ values = self._values
+ if values: values = ", %s" % ', '.join(map(repr, values))
+ else: values = ""
+ attrs = self._attrs
+ if attrs: attrs = ", **%s" % repr(attrs)
+ else: attrs = ""
+ return "%s(%s%s%s)" % (self.__class__.__name__, oid, values, attrs)
+
+################################################################################
+# Faits
+
+class Fact(object):
+ """Un fait liant deux références d'objets
+
+ Le fait a le type de l'objet source (propriété `sotype`), son identifiant
+ (propriété `soid`), le verbe décrivant le lien (propriété `verb`), le type
+ de l'objet cible (propriété `totype`), son identifiant (propriété `toid`),
+ et des attributs multivalués (toutes les autres propriétés)
+ """
+
+ ATTRS = dict(
+ sotype=None, soid=None,
+ verb=None,
+ totype=None, toid=None,
+ attrs=None,
+ )
+
+ _rw_attrs = set(('sotype', 'soid', 'verb', 'totype', 'toid'))
+ _ro_attrs = set(('attrs',))
+ _reserved_attrs = _rw_attrs | _ro_attrs
+
+ _sotype = None
+ _soid = None
+ _verb = None
+ _totype = None
+ _toid = None
+ _attrs = None
+
+ def __init__(self, sotype=None, soid=None, verb=None, totype=None, toid=None, **attrs):
+ if verb.startswith('~'):
+ verb = verb[1:]
+ tmpotype, tmpoid = totype, toid
+ totype, toid = sotype, soid
+ sotype, soid = tmpotype, tmpoid
+ self.__dict__['_sotype'] = sotype
+ self.__dict__['_soid'] = soid
+ self.__dict__['_verb'] = verb
+ self.__dict__['_totype'] = totype
+ self.__dict__['_toid'] = toid
+ self.__dict__['_attrs'] = {}
+ for attr, value in attrs.items():
+ self.update(attr, value)
+
+ def __parse(self, attr, value):
+ """obtenir le parser qui permet de s'assurer que value est dans le bon
+ format pour l'attribut attr.
+ """
+ if isindex(attr): parser = None
+ else: parser = self.ATTRS.get(attr, None)
+ if parser is None: return value
+ elif isseq(value): return flattenseq(map(parser.parse, value))
+ else: return parser.parse(value)
+
+ # accès aux attributs
+ def __getattr__(self, attr):
+ if attr in self._reserved_attrs:
+ return getattr(self, '_%s' % attr)
+ try:
+ return self._attrs[attr]
+ except KeyError:
+ raise AttributeError(attr)
+ def __setattr__(self, attr, value):
+ value = self.__parse(attr, value)
+ if attr in self._rw_attrs:
+ return super(Link, self).__setattr__('_%s' % attr, value)
+ elif attr in self._ro_attrs:
+ raise AttributeError(attr)
+ else:
+ self._attrs[attr] = listof(value)
+ def __delattr__(self, attr):
+ if attr in self._reserved_attrs:
+ raise AttributeError(attr)
+ try: del self._attrs[attr]
+ except KeyError: raise AttributeError(attr)
+ def __getitem__(self, attr):
+ if attr in self._reserved_attrs:
+ return getattr(self, '_%s' % attr)
+ else:
+ return self._attrs[attr]
+ def __setitem__(self, attr, value):
+ value = self.__parse(attr, value)
+ if attr in self._rw_attrs:
+ return super(Link, self).__setattr__('_%s' % attr, value)
+ elif attr in self._ro_attrs:
+ raise KeyError(attr)
+ else:
+ self._attrs[attr] = listof(value)
+ def __delitem__(self, attr):
+ if attr in self._reserved_attrs: raise KeyError(attr)
+ else: del self._attrs[attr]
+
+ def first(self, attr, default=None):
+ if attr in self._reserved_attrs:
+ return getattr(self, '_%s' % attr)
+ if self._attrs.has_key(attr):
+ values = self._attrs[attr]
+ if values: return values[0]
+ return default
+ def get(self, attr, default=None):
+ if attr in self._reserved_attrs:
+ return getattr(self, '_%s' % attr)
+ else:
+ return self._attrs.get(attr, default)
+ def has_key(self, attr):
+ """tester l'existence d'un attribut"""
+ if attr in self._reserved_attrs:
+ return True
+ else:
+ return self._attrs.has_key(attr)
+ @property
+ def known_attrs(self):
+ """obtenir une liste triée d'attributs faisant partie du schéma"""
+ return sorted(list(self.ATTRS.keys()))
+ @property
+ def misc_attrs(self):
+ """obtenir une liste triée d'attributs ne faisant pas partie du schéma"""
+ schema_attrs = set(self.ATTRS.keys())
+ defined_attrs = set(self._attrs.keys())
+ return sorted(list(defined_attrs - schema_attrs))
+ @property
+ def missing_attrs(self):
+ """obtenir une liste triée d'attributs faisant partie du schéma mais non définis"""
+ schema_attrs = set(self.ATTRS.keys())
+ defined_attrs = set(self._attrs.keys())
+ return sorted(list(schema_attrs - defined_attrs - self._reserved_attrs))
+ def update(self, attr, value=None, update_type=ADD_UNIQUE):
+ """mettre à jour l'attribut spécifié
+
+ si l'attribut n'existe pas, il est créé. sinon, la liste des valeurs de
+ l'attribut est étendue.
+
+ si value==None, aucune mise à jour n'est effectuée
+
+ si attr est une instance de dictionnaire, mettre à jour *tous* les
+ attributs spécifiés.
+
+ update_type est la méthode de mise à jour
+ """
+ if isinstance(attr, dict):
+ attrs = attr
+ for attr, value in attrs.items():
+ self.update(attr, value, update_type)
+ return self
+ if attr in self._reserved_attrs:
+ raise KeyError(attr)
+ if value is not None:
+ values = listof(self.__parse(attr, value))
+ if not self._attrs.has_key(attr): self._attrs[attr] = []
+ attr = self._attrs[attr]
+ if update_type is ADD_UNIQUE:
+ for value in values:
+ if value not in attr:
+ attr.append(value)
+ elif update_type is ADD:
+ attr.extend(values)
+ elif update_type is REMOVE:
+ for value in values:
+ if value in attr:
+ attr.remove(value)
+ elif update_type is RESET_ADD:
+ attr[:] = values
+ return self
+ def set_defaults(self, attr, value=None, update_type=ADD_UNIQUE):
+ """Mettre à jour l'attribut spécifié s'il n'existe pas
+
+ si value==None, aucune mise à jour n'est effectuée
+
+ si attr est une instance de dictionnaire, mettre à jour *tous* les
+ attributs spécifiés s'ils n'existent pas.
+ """
+ if isinstance(attr, dict):
+ attrs = attr
+ for attr, value in attrs.items():
+ self.set_defaults(attr, value, update_type)
+ return self
+ if attr in self._reserved_attrs:
+ raise KeyError(attr)
+ if not self._attrs.has_key(attr):
+ self.update(attr, value, update_type)
+ return self
+
+ def clone(self):
+ """cloner ce lien"""
+ return self.__class__(self._sotype, self._soid, self._verb, self._totype, self._toid, **self._attrs)
+
+ # catalogue
+ def sresolve(self, catalog, default=_RAISE_EXCEPTION, create=True, resolve=True):
+ return catalog.get(self.sotype, self.soid, default, create, resolve)
+
+ def tresolve(self, catalog, default=_RAISE_EXCEPTION, create=True, resolve=True):
+ return catalog.get(self.totype, self.toid, default, create, resolve)
+
+ def resolve(self, catalog, default=_RAISE_EXCEPTION, create=True, resolve=True):
+ """obtenir les objets liés (source, verb, target)
+ """
+ source = catalog.get(self.sotype, self.soid, default, create, resolve)
+ target = catalog.get(self.totype, self.toid, default, create, resolve)
+ return (source, self.verb, target)
+
+ # divers
+ def _dump_idtype(self, indent, prefix=None):
+ if prefix is None: prefix = ''
+ else: prefix = "%s " % prefix
+ print "%s%s%s:%s %s %s:%s " % (indent, prefix, self._sotype, self._soid, self._verb, self._totype, self._toid)
+ def _dump_attrs(self, indent):
+ attrs = self._attrs
+ missing_attrs = self.missing_attrs
+ if attrs or missing_attrs:
+ print "%s attrs:" % indent
+ for name, values in attrs.items():
+ if len(values) == 1:
+ print "%s %s=%s" % (indent, name, repr(values[0]))
+ else:
+ print "%s %s=(%s)" % (indent, name, ', '.join(map(repr, values)))
+ for name in missing_attrs:
+ print "%s %s=" % (indent, name)
+ def dump(self, indent='', prefix=None):
+ """Afficher l'identifiant, le type et les attributs de ce lien
+ """
+ self._dump_idtype(indent, prefix)
+ self._dump_attrs(indent)
+
+################################################################################
+# variables globales
+
+catalog = Catalog()
diff --git a/lib/nulib/python/deploydb/parser.py b/lib/nulib/python/deploydb/parser.py
new file mode 100644
index 0000000..0ab2c51
--- /dev/null
+++ b/lib/nulib/python/deploydb/parser.py
@@ -0,0 +1,216 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+"""Parser pour un fichier de configuration
+
+Syntaxe:
+~~~
+# comment
+object id var=value
+ continuation="line starting with a space"
+ -link otherid
+
+-link objectref1 predicate -link objectref2
+~~~
+"""
+
+__all__ = (
+ 'split_namev', 'split_nvalue', 'split_nlist',
+ 'Parser',
+)
+
+import logging; log = logging.getLogger(__name__)
+import sys, re
+
+from .utils import *
+from .lexer import *
+from .objects import *
+
+RE_NAMETYPE = re.compile(r'(\S+):(\w+)$')
+RE_NAMEVALUE = re.compile(r'(\S+)=(\S*)')
+
+def split_namev(arg, sep=None):
+ """spliter un argument de la forme
+ name[method][=value]
+
+ Si value n'est pas spécifié, il vaut None
+ method peut être % (reset), + (add), - (del) et sa valeur par défaut est ADD_UNIQUE
+
+ si sep is not None, splitter values sur cette valeur
+ """
+ if '=' in arg:
+ name, value = arg.split('=', 1)
+ if sep is not None:
+ value = value.split(sep)
+ else:
+ name = arg
+ value = None
+ if name.endswith('%'):
+ name = name[:-1]
+ method = RESET_ADD
+ elif name.endswith('+'):
+ name = name[:-1]
+ method = ADD
+ elif name.endswith('-'):
+ name = name[:-1]
+ method = REMOVE
+ else:
+ method = ADD_UNIQUE
+ mo = RE_NAMETYPE.match(name)
+ if mo is not None:
+ name, type = mo.groups()
+ else:
+ type = None
+ return name, value, method
+
+def split_nvalue(arg):
+ """spliter un argument de la forme
+ [name=]value
+
+ Si name n'est pas spécifié, il vaut None
+ """
+ if '=' in arg:
+ name, value = arg.split('=', 1)
+ else:
+ name = None
+ value = arg
+ return name, value
+
+def split_nlist(arg):
+ """spliter un argument de la forme
+ [name=]values
+
+ Si name n'est pas spécifié, il vaut None
+ values est un ensemble de valeurs séparées par des virgules
+ """
+ if '=' in arg:
+ name, values = arg.split('=', 1)
+ values = values.split(',')
+ else:
+ name = None
+ values = arg.split(',')
+ return name, values
+
+def split_list(arg):
+ """spliter un argument de la forme
+ values
+
+ values est un ensemble de valeurs séparées par des virgules
+ """
+ return arg.split(',')
+
+class LoneError(ValueError):
+ """Exception lancée quand un verbe attend un argument inexistant
+ """
+
+class Parser(object):
+ args = None
+
+ def __init__(self, file=None):
+ if file is not None:
+ self.parse(file=file)
+
+ def parse(self, predicates=None, file=None):
+ if file is not None:
+ lexer = Lexer(file)
+ predicates = lexer.get_predicates()
+ for args in predicates:
+ self.args = args
+ if self.isa_link(): self.handle_fact()
+ else: self.handle_object()
+ return self
+
+ def eop(self):
+ return not self.args
+ def isa_verb(self):
+ return self.args and not self.args[0].startswith('-')
+ def isa_link(self):
+ return self.args and self.args[0].startswith('-')
+ def isa_namevalue(self):
+ return self.args and RE_NAMEVALUE.match(self.args[0]) is not None
+
+ def pop(self, desc=None):
+ arg = self.args.pop(0)
+ if desc is not None and self.eop():
+ log.warning("lone %s '%s' was ignored", desc, arg)
+ raise LoneError(arg)
+ return arg
+ def pop_link(self, desc=None):
+ if not self.isa_link():
+ raise ValueError("expected -otype")
+ return self.pop(desc)[1:]
+ def pop_namev(self, sep=None):
+ return split_namev(self.pop(), sep)
+ def pop_nvalue(self):
+ return split_nvalue(self.pop())
+ def pop_nlist(self):
+ return split_nlist(self.pop())
+ def pop_list(self):
+ return split_list(self.pop())
+
+ def handle_object(self):
+ # créer ou mettre à jour un objet
+ try:
+ otype = self.pop("object type")
+ if self.isa_namevalue():
+ oid, values, method = self.pop_namev(',')
+ else:
+ oid = self.pop()
+ values = ()
+ method = ADD_UNIQUE
+ object = catalog.create_object(otype, oid)
+ object.update('values', values, method)
+ while not self.isa_link():
+ if self.eop(): break
+ name, value, method = self.pop_namev()
+ if value is None: value = 'true'
+ object.update(name, value, method)
+ while self.isa_link():
+ ltype = self.pop_link()
+ lids = self.pop_list()
+ links = [object.linkto(lid, ltype) for lid in lids]
+ while not self.isa_link():
+ if self.eop(): break
+ name, value, method = self.pop_namev()
+ if value is None: value = 'true'
+ for link in links:
+ link.update(name, value, method)
+ except LoneError:
+ pass
+
+ def handle_fact(self):
+ # créer un ou plusieurs liens
+ try:
+ sotype = self.pop_link("source link type")
+ soids = self.pop_list()
+ defo = Object() # pour les attributs par défaut
+ while self.isa_namevalue():
+ name, value, method = self.pop_namev()
+ if value is None: value = 'true'
+ defo.update(name, value, method)
+ while not self.eop():
+ verb = self.pop("verb")
+ totype = self.pop_link("dest link type")
+ toids = self.pop_list()
+ facts = []
+ for soid in soids:
+ for toid in toids:
+ fact = catalog.create_fact(sotype, soid, verb, totype, toid)
+ fact.update(defo.attrs)
+ facts.append(fact)
+ while self.isa_namevalue():
+ name, value, method = self.pop_namev()
+ if value is None: value = 'true'
+ for fact in facts:
+ fact.update(name, value, method)
+ except LoneError:
+ pass
+
+ def parse_attrs(self, namevalues, object):
+ """analyser une liste de définition d'attributs name=value et mettre à jour
+ object
+ """
+ self.args = listof(namevalues)
+ while not self.eop():
+ name, value, method = self.pop_namev()
+ if value is None: value = 'true'
+ object.update(name, value, method)
diff --git a/lib/nulib/python/deploydb/toinst_module.py b/lib/nulib/python/deploydb/toinst_module.py
new file mode 100644
index 0000000..d4b7a0e
--- /dev/null
+++ b/lib/nulib/python/deploydb/toinst_module.py
@@ -0,0 +1,377 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = (
+ 'Webapp',
+ 'webapp_matcher', 'webappname_matcher',
+)
+
+import logging; log = logging.getLogger(__name__)
+import os, sys
+from os import path
+
+from .utils import *
+from .expr import *
+from .objects import XT, fileP, pathP, mpathP, lowerP, Object, catalog
+from .parser import Parser
+from .base_module import withdomain, host_matcher, hostname_matcher
+
+################################################################################
+# Webapp
+
+def match_webapp(qwebapp, object):
+ if withpath(qwebapp): # webapp avec chemin
+ return qwebapp in object.get('webapp', ())
+ else: # nom de webapp
+ return qwebapp in object.get('webappname', ())
+def webapp_matcher(qwebapp):
+ return lambda object: match_webapp(qwebapp, object)
+
+def match_webappname(qwebapp, object):
+ qwebapp = path.basename(qwebapp)
+ return qwebapp in object.get('webappname', ())
+def webappname_matcher(qwebapp):
+ return lambda object: match_webappname(qwebapp, object)
+
+class Webapp(Object):
+ ATTRS = XT(Object,
+ values=pathP, webapp=mpathP, webappdir=pathP,
+ basedir=pathP, dirspec=fileP)
+
+ def _resolve(self, catalog):
+ if self.oid == '*': return
+ default = catalog.get(self.otype, '*', None, False)
+
+ webapps = self.get('webapp', [])
+ basedir = self.get('basedir', None)
+ if basedir is not None:
+ webapps.extend(self.resolve_basedir(basedir, dirs=True))
+ dirspec = self.get('dirspec', None)
+ if dirspec is not None:
+ webapps.extend(self.resolve_filespec(dirspec, dirs=True))
+
+ if webapps:
+ # générer webappdir et webappname à partir de webapp
+ webappdirs = [path.dirname(webapp) for webapp in webapps]
+ if webappdirs: webappdirs = self.webappdir = ulistof(webappdirs)
+
+ webappnames = [path.basename(webapp) for webapp in webapps]
+ if webappnames: webappnames = self.webappname = ulistof(webappnames)
+
+ else:
+ # générer webapps à partir de webappdir et webappname
+ webappdirs = self.get('webappdir', ())
+ if not webappdirs and default is not None:
+ webappdirs = default.get('webappdir', ())
+ if webappdirs: webappdirs = self.webappdir = ulistof(webappdirs)
+
+ webappnames = self.get('webappname', ())
+ if not webappnames: webappnames = [self.oid]
+ if webappnames: webappnames = self.webappname = ulistof(webappnames)
+
+ if webappdirs:
+ webapps = []
+ for webappname in webappnames:
+ found = []
+ for webappdir in webappdirs:
+ webapp = path.join(webappdir, webappname)
+ if path.exists(webapp):
+ found.append(webapp)
+ break
+ if not found:
+ found = [path.join(webappdirs[0], webappname)]
+ webapps.extend(found)
+ else:
+ webapps = webappnames
+ if webapps: webapps = self.webapp = ulistof(webapps)
+
+ if not self.values:
+ self.values = webapps
+
+################################################################################
+# Actions
+
+def option_choice(yesoption, nooption):
+ def func(value, *ignored):
+ if istrue(value): return yesoption
+ else: return nooption
+ return func
+
+def pffprofile_support(value, fact, webapp):
+ pffprofile = fact.get('pffprofile', None)
+ if pffprofile is None: pffprofile = webapp.get('pffprofile', None)
+ if pffprofile is None: return None
+ if value == 'ensure':
+ return ['--ensure-pffprofile', pffprofile[0]]
+ elif value == 'set':
+ return ['--set-pffprofile', pffprofile[0]]
+
+TOINST_ATTRS = {
+ 'tomcat_profile': dict(
+ option='--config-profile',
+ ), 'catalina_base': dict(
+ option='--catalina-base',
+ ), 'tomcat_user': dict(
+ option='--tomcat-user',
+ ), 'tomcat_group': dict(
+ option='--tomcat-group',
+ ), 'tomcat_version': dict(
+ option='--tomcat-version',
+ ), 'manager_url': dict(
+ option='--manager-url',
+ ), 'manager_user': dict(
+ option='--manager-user',
+ ), 'manager_password': dict(
+ option='--manager-password',
+ ), 'wamap': dict(
+ option='--wamap',
+ multiple=True,
+ flattensep=',',
+ ), 'exclude': dict(
+ option='--exclude',
+ multiple=True,
+ ), 'exclude_override': dict(
+ option='--replace-excludes',
+ multiple=True,
+ ), 'protect': dict(
+ option='--protect',
+ multiple=True,
+ ), 'rsync_option': dict(
+ option='--rsync-option',
+ multiple=True,
+ ), 'rsync_option_override': dict(
+ option='--replace-rsync-options',
+ multiple=True,
+ ), 'backup': dict(
+ func=option_choice('--backup', '--no-backup'),
+ ), 'restart': dict(
+ func=option_choice('--restart', '--no-restart'),
+ ), 'legacy_sort': dict(
+ func=option_choice('--legacy-sort', None),
+ ), 'pffprofile_support': dict(
+ func=pffprofile_support,
+ ),
+}
+
+def query_rtoinst(*args):
+ """afficher la commande pour déployer avec la commande $1 la webapp $2 sur
+ l'hôte $3 dans le profil $4 ou le profil pff $5
+
+ $1 doit valoir 'rtoinst' ou être un chemin vers ce script
+
+ $2 peut être
+ * un nom de webapp: toutes les webapps de ce nom sont sélectionnés
+ * un chemin complet: si une webapp avec le chemin complet est trouvée, ne
+ sélectinner que celle-là, sinon faire comme si on n'avait spécifié que le
+ nom de la webapp
+ * non spécifié: toutes les webapps devant être déployé sur l'hôte sont
+ cherchées
+
+ $3 peut être
+ * un nom d'hôte: tous les hôtes de ce nom sont sélectionés
+ * un nom d'hôte pleinement qualifié: si le nom d'hôte pleinement qualifié
+ est trouvé, ne sélectionner que celui-là, sinon faire comme si on n'avait
+ spécifié que le nom d'hôte
+ * non spécifié: tous les hôtes vers lequel doit être déployé le webapp sont
+ cherchés
+
+ $4 peut valoir
+ * 'NONE': seuls les déploiements sans profils définis sont sélectionnés.
+ c'est la valeur par défaut.
+ * 'ALL' ou '': ne pas tenir compte du profil lors de la sélection des
+ webapps et des hôtes
+ * toute autre valeur, e.g prod ou test: seuls les déploiements de ce profil
+ sont sélectionnés
+ Il est possible de spécifier plusieurs profils en les séparant par des
+ virgules. Par exemple, 'NONE,prod' permet de sélectionner les déploiements
+ sans profil ou dans le profil 'prod'
+
+ $5 peut valoir
+ * 'NONE': seuls les déploiement sans profils pff définis sont sélectionnés.
+ * 'ALL' ou '': ne pas tenir compte du profil pff lors de la sélection des
+ webapps et des hôtes. c'est la valeur par défaut.
+ * toute autre valeur, e.g prod ou test: seuls les déploiements de ce profil
+ pff sont sélectionnés.
+
+ la webapp, ou l'hôte, ou les deux sont requis. le profil et le profil pff
+ sont facultatifs.
+
+ Les valeurs $5..$* sont des définitions d'attributs utilisées pour mettre à
+ jour les faits trouvés. Les mappings suivants sont supportés:
+
+ = attribut = = option de toinst =
+ tomcat_profile --config-profile
+ catalina_base --catalina-base
+ tomcat_user --tomcat-user
+ tomcat_group --tomcat-group
+ tomcat_version --tomcat-version
+ manager_url --manager-url
+ manager_user --manager-user
+ manager_password --manager-password
+ wamap --wamap
+ exclude --exclude
+ exclude_override --replace-excludes
+ protect --protect
+ rsync_option --rsync-option
+ rsync_option_override --replace-rsync-options
+ backup --backup / --no-backup
+ restart --restart / --no-restart
+ legacy_sort --legacy-sort
+ pffprofile_support --ensure-pffprofile / --set-pffprofile
+ """
+ rtoinst = args[0] if args[0:1] else None
+ if rtoinst is not None and (rtoinst == 'rtoinst' or rtoinst.endswith('/rtoinst')):
+ verb = 'rtoinst'
+ else:
+ raise ValueError("Le verbe est requis et doit valoir 'rtoinst'")
+ qwebapp = args[1:2] and args[1] or None
+ qhost = args[2:3] and args[2] or None
+ qprofile = args[3] if args[3:4] else 'NONE'
+ qpffprofile = args[4] if args[4:5] else 'ALL'
+ supplattrs = args[5:]
+
+ if not qwebapp and not qhost:
+ raise ValueError("Il faut spécifier webapp et/ou host")
+
+ if not qwebapp:
+ webapps = None
+ elif cwithpath(qwebapp):
+ qwebapp = path.abspath(qwebapp)
+ webapps = catalog.find_objects('webapp', expr=webapp_matcher(qwebapp))
+ if not webapps:
+ webapps = catalog.find_objects('webapp', expr=webappname_matcher(qwebapp))
+ else:
+ webapps = catalog.find_objects('webapp', expr=webappname_matcher(qwebapp))
+
+ if not qhost:
+ hosts = None
+ else:
+ if cwithpath(qhost):
+ qhost = path.basename(path.abspath(qhost))
+ if withdomain(qhost):
+ hosts = catalog.find_objects('host', expr=host_matcher(qhost))
+ if not hosts:
+ hosts = catalog.find_objects('host', expr=hostname_matcher(qhost))
+ else:
+ hosts = catalog.find_objects('host', expr=hostname_matcher(qhost))
+
+ if qprofile == '': qprofile = 'ALL'
+ qprofiles = flattenstr([qprofile])
+ if 'ALL' in qprofiles:
+ qprofile = None
+ else:
+ expr = []
+ for qprofile in qprofiles:
+ if qprofile == 'NONE':
+ qprofile = NONE(EXISTS('profile'))
+ else:
+ qprofile = dict(profile=qprofile)
+ expr.append(qprofile)
+ qprofile = ANY(*expr)
+
+ if qpffprofile == '': qpffprofile = 'ALL'
+ qpffprofiles = flattenstr([qpffprofile])
+ if 'ALL' in qpffprofiles:
+ qpffprofile = None
+ else:
+ expr = []
+ for qpffprofile in qpffprofiles:
+ if qpffprofile == 'NONE':
+ qpffprofile = NONE(EXISTS('pffprofile'))
+ else:
+ qpffprofile = dict(pffprofile=qpffprofile)
+ expr.append(qpffprofile)
+ qpffprofile = ANY(*expr)
+
+ if qprofile is None and qpffprofile is None:
+ expr = None
+ elif qprofile is not None and qpffprofile is not None:
+ expr = ALL(qprofile, qpffprofile)
+ elif qprofile is not None:
+ expr = qprofile
+ elif qpffprofile is not None:
+ expr = qpffprofile
+
+ # webapps et hosts sont spécifiés
+ if webapps is not None and hosts is not None:
+ facts = catalog.find_facts(
+ verb=verb,
+ tsotype='webapp', tsexpr=dict(oid=[webapp.oid for webapp in webapps]),
+ ttotype='host', ttexpr=dict(oid=[host.oid for host in hosts]),
+ expr=expr,
+ )
+
+ # Seuls les webapps sont spécifiés: chercher les hôtes
+ elif webapps is not None:
+ facts = catalog.find_facts(
+ verb=verb,
+ tsotype='webapp', tsexpr=dict(oid=[webapp.oid for webapp in webapps]),
+ ttotype='host',
+ expr=expr,
+ )
+
+ # Seuls les hôtes sont spécifiés: chercher les webapps
+ elif hosts is not None:
+ facts = catalog.find_facts(
+ verb=verb,
+ tsotype='webapp',
+ ttotype='host', ttexpr=dict(oid=[host.oid for host in hosts]),
+ expr=expr,
+ )
+
+ # afficher la commande
+ if supplattrs: parser = Parser()
+ for fact, tsobjects, ttobjects in facts:
+ if supplattrs: parser.parse_attrs(supplattrs, fact)
+ hs = ':'.join(flattenseq([host.host for host in ttobjects]))
+ for webapp in tsobjects:
+ # construire les options de toinst. on prend les valeurs d'abord dans le
+ # fait puis dans l'objet webapp.
+ options = []
+ names = set(fact.attrs.keys())
+ names.update(webapp.attrs.keys())
+ for name in names:
+ values = fact.get(name, None)
+ factvalue = True
+ if values is None:
+ values = webapp.get(name, None)
+ factvalue = False
+ if values is None:
+ # ne devrait pas se produire en principe
+ continue
+ if name in ('profile', 'pffprofile'):
+ # les attributs de sélection du profil ont été déjà été traités
+ # plus haut
+ continue
+ params = TOINST_ATTRS.get(name, None)
+ if params is None:
+ if factvalue:
+ log.warning("ignoring %s option %s=%r", fact.verb, name, values)
+ else:
+ func = params.get('func', None)
+ option = params.get('option', None)
+ if func is not None:
+ option = func(values[0], fact, webapp)
+ if option is not None:
+ options.extend(listof(option))
+ elif option is not None:
+ if params.get('multiple', False):
+ flattensep = params.get('flattensep', None)
+ if flattensep is not None:
+ values = flattenstr(values, flattensep)
+ for value in values:
+ options.append(option)
+ options.append(qshell(value))
+ else:
+ options.append(option)
+ options.append(qshell(values[0]))
+ else:
+ raise ValueError("missing option key for attribute %s" % name)
+
+ for w in webapp.webapp:
+ # préférer si possible le chemin fourni par l'utilisateur
+ if withpath(qwebapp): w = qwebapp
+ parts = [rtoinst, '--no-deploydb', '-yh', qshell(hs), qshell(w)]
+ if options:
+ parts.append('--')
+ parts.extend(options)
+ print ' '.join(parts)
diff --git a/lib/nulib/python/deploydb/uinst_module.py b/lib/nulib/python/deploydb/uinst_module.py
new file mode 100644
index 0000000..615fe8a
--- /dev/null
+++ b/lib/nulib/python/deploydb/uinst_module.py
@@ -0,0 +1,238 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = (
+ 'Module',
+ 'module_matcher', 'modulename_matcher',
+)
+
+import logging; log = logging.getLogger(__name__)
+import os, sys
+from os import path
+
+from .utils import *
+from .expr import *
+from .objects import XT, fileP, pathP, mpathP, lowerP, Object, catalog
+from .parser import Parser
+from .base_module import withdomain, host_matcher, hostname_matcher
+
+################################################################################
+# Module
+
+def match_module(qmodule, object):
+ if withpath(qmodule): # module avec chemin
+ return qmodule in object.get('module', ())
+ else: # nom de module
+ return qmodule in object.get('modulename', ())
+def module_matcher(qmodule):
+ return lambda object: match_module(qmodule, object)
+
+def match_modulename(qmodule, object):
+ qmodule = path.basename(qmodule)
+ return qmodule in object.get('modulename', ())
+def modulename_matcher(qmodule):
+ return lambda object: match_modulename(qmodule, object)
+
+class Module(Object):
+ ATTRS = XT(Object,
+ values=pathP, module=mpathP, moduledir=pathP,
+ basedir=pathP, dirspec=fileP)
+
+ def _resolve(self, catalog):
+ if self.oid == '*': return
+ default = catalog.get(self.otype, '*', None, False)
+
+ modules = self.get('module', [])
+ basedir = self.get('basedir', None)
+ if basedir is not None:
+ modules.extend(self.resolve_basedir(basedir, dirs=True))
+ dirspec = self.get('dirspec', None)
+ if dirspec is not None:
+ modules.extend(self.resolve_filespec(dirspec, dirs=True))
+
+ if modules:
+ # générer moduledir et modulename à partir de module
+ moduledirs = [path.dirname(module) for module in modules]
+ if moduledirs: moduledirs = self.moduledir = ulistof(moduledirs)
+
+ modulenames = [path.basename(module) for module in modules]
+ if modulenames: modulenames = self.modulename = ulistof(modulenames)
+
+ else:
+ # générer modules à partir de moduledir et modulename
+ moduledirs = self.get('moduledir', ())
+ if not moduledirs and default is not None:
+ moduledirs = default.get('moduledir', ())
+ if moduledirs: moduledirs = self.moduledir = ulistof(moduledirs)
+
+ modulenames = self.get('modulename', ())
+ if not modulenames: modulenames = [self.oid]
+ if modulenames: modulenames = self.modulename = ulistof(modulenames)
+
+ if moduledirs:
+ modules = []
+ for modulename in modulenames:
+ found = []
+ for moduledir in moduledirs:
+ module = path.join(moduledir, modulename)
+ if path.exists(module):
+ found.append(module)
+ break
+ if not found:
+ found = [path.join(moduledirs[0], modulename)]
+ modules.extend(found)
+ else:
+ modules = modulenames
+ if modules: modules = self.module = ulistof(modules)
+
+ if not self.values:
+ self.values = modules
+
+################################################################################
+# Actions
+
+def query_xuinst(*args):
+ """afficher la commande pour déployer avec la commande $1 le module $2 sur
+ l'hôte $3 dans le profil $4
+
+ $1 peut valoir 'ruinst' ou 'uinst' ou être un chemin vers l'un de ces deux
+ scripts
+
+ $2 peut être
+ * un nom de module: tout les modules de ce nom sont sélectionnés
+ * un chemin complet: si un module avec le chemin complet est trouvé, ne
+ sélectinner que celui-là, sinon faire comme si on n'avait spécifié que le
+ nom du module
+ * non spécifié: tout les modules devant être déployé sur l'hôte sont
+ cherchés
+
+ $3 peut être
+ * un nom d'hôte: tous les hôtes de ce nom sont sélectionés
+ * un nom d'hôte pleinement qualifié: si le nom d'hôte pleinement qualifié
+ est trouvé, ne sélectionner que celui-là, sinon faire comme si on n'avait
+ spécifié que le nom d'hôte
+ * non spécifié: tous les hôtes vers lequel doit être déployé le module sont
+ cherchés
+
+ $4 peut valoir
+ * 'NONE': seuls les déploiements sans profils définis sont sélectionnés.
+ c'est la valeur par défaut.
+ * 'ALL' ou '': ne pas tenir compte du profil lors de la sélection des
+ modules et des hôtes
+ * toute autre valeur, e.g prod ou test: seuls les déploiement de ce profil
+ sont sélectionnés
+ Il est possible de spécifier plusieurs profils en les séparant par des
+ virgules. Par exemple, 'prod,NONE' permet de sélectionner les déploiements
+ sans profil ou dans le profil 'prod'
+
+ le module, ou l'hôte, ou les deux sont requis. le profil est facultatif.
+
+ Les valeurs $5..$* sont des définitions d'attributs utilisées pour mettre à
+ jour les faits trouvés. Ces définitions sont utilisés comme argument de
+ uinst.
+ """
+ xuinst = args[0] if args[0:1] else None
+ if xuinst is None:
+ verb = None
+ elif xuinst == 'ruinst' or xuinst.endswith('/ruinst'):
+ verb = 'ruinst'
+ elif xuinst == 'uinst' or xuinst.endswith('/uinst'):
+ verb = 'uinst'
+ else:
+ verb = None
+ if verb is None:
+ raise ValueError("Le verbe est requis et doit être 'uinst' ou 'ruinst'")
+ qmodule = args[1:2] and args[1] or None
+ qhost = args[2:3] and args[2] or None
+ qprofile = args[3] if args[3:4] else 'NONE'
+ supplattrs = args[4:]
+
+ if not qmodule and not qhost:
+ raise ValueError("Il faut spécifier module et/ou host")
+
+ if not qmodule:
+ modules = None
+ elif cwithpath(qmodule):
+ qmodule = path.abspath(qmodule)
+ modules = catalog.find_objects('module', expr=module_matcher(qmodule))
+ if not modules:
+ modules = catalog.find_objects('module', expr=modulename_matcher(qmodule))
+ else:
+ modules = catalog.find_objects('module', expr=modulename_matcher(qmodule))
+
+ if not qhost:
+ hosts = None
+ else:
+ if cwithpath(qhost):
+ qhost = path.basename(path.abspath(qhost))
+ if withdomain(qhost):
+ hosts = catalog.find_objects('host', expr=host_matcher(qhost))
+ if not hosts:
+ hosts = catalog.find_objects('host', expr=hostname_matcher(qhost))
+ else:
+ hosts = catalog.find_objects('host', expr=hostname_matcher(qhost))
+
+ if qprofile == '': qprofile = 'ALL'
+ qprofiles = flattenstr([qprofile])
+ if 'ALL' in qprofiles:
+ qprofile = None
+ else:
+ expr = []
+ for qprofile in qprofiles:
+ if qprofile == 'NONE':
+ qprofile = NONE(EXISTS('profile'))
+ else:
+ qprofile = dict(profile=qprofile)
+ expr.append(qprofile)
+ qprofile = ANY(*expr)
+
+ # modules et hosts sont spécifiés
+ if modules is not None and hosts is not None:
+ facts = catalog.find_facts(
+ verb=verb,
+ tsotype='module', tsexpr=dict(oid=[module.oid for module in modules]),
+ ttotype='host', ttexpr=dict(oid=[host.oid for host in hosts]),
+ expr=qprofile,
+ )
+
+ # Seuls les modules sont spécifiés: chercher les hôtes
+ elif modules is not None:
+ facts = catalog.find_facts(
+ verb=verb,
+ tsotype='module', tsexpr=dict(oid=[module.oid for module in modules]),
+ ttotype='host',
+ expr=qprofile,
+ )
+
+ # Seuls les hôtes sont spécifiés: chercher les modules
+ elif hosts is not None:
+ facts = catalog.find_facts(
+ verb=verb,
+ tsotype='module',
+ ttotype='host', ttexpr=dict(oid=[host.oid for host in hosts]),
+ expr=qprofile,
+ )
+
+ # afficher la commande
+ if supplattrs: parser = Parser()
+ for fact, tsobjects, ttobjects in facts:
+ hs = flattenseq([host.host for host in ttobjects])
+ ms = flattenseq([module.module for module in tsobjects])
+ if supplattrs: parser.parse_attrs(supplattrs, fact)
+ vars = []
+ for name, values in fact.attrs.items():
+ vars.append("%s=%s" % (name, qshell(':'.join(values))))
+ for m in ms:
+ # préférer si possible le chemin fourni par l'utilisateur
+ if withpath(qmodule): m = qmodule
+ if fact.verb == 'uinst':
+ # chaque hôte est traité à part avec uinst:rsync
+ for h in hs:
+ parts = [xuinst, '--no-deploydb', '-yh', qshell(h), qshell(m)]
+ if vars:
+ parts.extend(['--', ' '.join(vars)])
+ print ' '.join(parts)
+ elif fact.verb == 'ruinst':
+ hs = ':'.join(hs)
+ parts = [xuinst, '--no-deploydb', '-h', qshell(hs), qshell(m), '--', '-y']
+ if vars: parts.append(' '.join(vars))
+ print ' '.join(parts)
diff --git a/lib/nulib/python/deploydb/utils.py b/lib/nulib/python/deploydb/utils.py
new file mode 100644
index 0000000..1d0813d
--- /dev/null
+++ b/lib/nulib/python/deploydb/utils.py
@@ -0,0 +1,150 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+"""Fonctions utilitaires diverses
+"""
+
+__all__ = (
+ 'isnum',
+ 'istrue', 'isfalse',
+ 'isseq', 'seqof', 'listof', 'ulistof',
+ 'flattenstr', 'flattenseq',
+ 'qshell',
+ 'withpath', 'cwithpath', 'find_in_path', 'relpath',
+)
+
+from os import path
+
+def isnum(v):
+ return isinstance(v, int) or isinstance(v, long)
+
+def istrue(b):
+ s = str(b).lower()
+ if s in ('true', 'vrai', 'yes', 'oui', '1'):
+ return True
+ elif s in ('false', 'faux', 'no', 'non', '0'):
+ return False
+ else:
+ return bool(b) # laisser python décider de la valeur
+
+def isfalse(b):
+ s = str(b).lower()
+ if s in ('true', 'vrai', 'yes', 'oui', '1'):
+ return False
+ elif s in ('false', 'faux', 'no', 'non', '0'):
+ return True
+ else:
+ return not bool(b) # laisser python décider de la valeur
+
+def isseq(t):
+ """Tester si t est une séquence
+ """
+ return isinstance(t, list) or isinstance(t, tuple) or isinstance(t, set)
+
+_SEQOF_UNDEF = object()
+def seqof(o, noneValue=_SEQOF_UNDEF):
+ """Retourner un tuple à parti de o
+ * si o est une séquence, retourner tuple(o)
+ * si noneValue est défini, et que o is noneValue, retourner noneValue
+ * sinon, retourner le tuple (o,)
+ """
+ if isseq(o): return tuple(o)
+ elif o is noneValue and noneValue is not _SEQOF_UNDEF: return noneValue
+ else: return (o,)
+
+_LISTOF_UNDEF = object()
+def listof(o, noneValue=_LISTOF_UNDEF):
+ """Retourner une nouvelle liste à parti de o
+ * si o est une séquence, retourner list(o)
+ * si noneValue est défini, et que o is noneValue, retourner noneValue
+ * sinon, retourner la liste [o]
+ """
+ if isseq(o): return list(o)
+ elif o is noneValue and noneValue is not _LISTOF_UNDEF: return noneValue
+ else: return [o]
+
+def ulistof(o, noneValue=_LISTOF_UNDEF):
+ """Retourner une nouvelle liste à parti de o
+ * si o est une séquence, retourner list(o)
+ * si noneValue est défini, et que o is noneValue, retourner noneValue
+ * sinon, retourner la liste [o]
+
+ La différence avec listof(), c'est que les doublons sont supprimés de la
+ liste, tout en préservant l'ordre original, ce qui n'est pas le cas avec
+ set()
+ """
+ if isseq(o): tmplist = list(o)
+ elif o is noneValue and noneValue is not _LISTOF_UNDEF: return noneValue
+ else: return [o]
+ ulist = []
+ for item in tmplist:
+ if item not in ulist: ulist.append(item)
+ return ulist
+
+def flattenstr(src, unique=True, clean=True, sep=','):
+ """découper chaque élément du tableau src selon sep et les aplatir dans une
+ seule liste.
+
+ Si unique==True, supprimer les doublons.
+ Si clean==True, supprimer les valeurs vides et les espaces périphériques
+
+ e.g flattenstr(['a , b', 'c,']) --> ['a', 'b', 'c']
+ """
+ if src is None: return None
+ dest = []
+ for items in seqof(src):
+ items = items.split(sep)
+ if clean: items = filter(None, map(lambda item: item.strip(), items))
+ if unique:
+ for item in items:
+ if item not in dest: dest.append(item)
+ else:
+ dest.extend(items)
+ return dest
+
+def flattenseq(seq):
+ """aplatir les éléments de seq en une seule liste
+
+ e.g flattenlist([(1, 2), (3, 4), 5]) --> [1, 2, 3, 4, 5]
+ """
+ if seq is None: return None
+ if not isseq(seq): return [seq]
+ items = []
+ for item in seq:
+ if isseq(item): items.extend(item)
+ else: items.append(item)
+ return items
+
+def qshell(values):
+ if isseq(values): return map(qshell, values)
+ elif not values: return ''
+ else: return "'%s'" % values.replace("'", "'\\''")
+
+def withpath(p): return p is not None and '/' in p
+def cwithpath(p): return p is not None and ('/' in p or p in ('.', '..'))
+
+def find_in_path(filename, dirs, allow_path=False):
+ """chercher le fichier nommé filename dans les répertoires dirs
+
+ si filename est un chemin (contient le caractère '/' ou path.sep) alors la
+ valeur est retournée telle quelle, sauf si allow_path=True
+
+ retourner le chemin complet dir/filename si le fichier est trouvé, ou None
+ si le fichier ne figure dans aucun des répertoires
+
+ """
+ is_path = '/' in filename or path.sep in filename
+ if is_path and not allow_path: return filename
+
+ for dir in dirs:
+ pf = path.join(dir, filename)
+ if path.isfile(pf): return pf
+ return None
+
+def relpath(filep, refp, abspath=True):
+ """exprimer filep par rapport au répertoire de refp
+
+ si abspath==True, rendre le chemin absolu
+ """
+ pf = path.join(dirname(refp), filep)
+ if abspath: pf = path.abspath(pf)
+ return pf
diff --git a/lib/nulib/python/deploydb/woinst_module.py b/lib/nulib/python/deploydb/woinst_module.py
new file mode 100644
index 0000000..1ee93e3
--- /dev/null
+++ b/lib/nulib/python/deploydb/woinst_module.py
@@ -0,0 +1,332 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = (
+ 'Wobundle',
+ 'wobundle_matcher', 'wobundlename_matcher',
+)
+
+import logging; log = logging.getLogger(__name__)
+import os, sys
+from os import path
+
+from .utils import *
+from .expr import *
+from .objects import XT, fileP, pathP, mpathP, lowerP, Object, catalog
+from .parser import Parser
+from .base_module import withdomain, host_matcher, hostname_matcher
+
+################################################################################
+# Wobundle
+
+def match_wobundle(qwobundle, object):
+ if withpath(qwobundle): # wobundle avec chemin
+ return qwobundle in object.get('wobundle', ())
+ else: # nom de wobundle
+ return qwobundle in object.get('wobundlename', ())
+def wobundle_matcher(qwobundle):
+ return lambda object: match_wobundle(qwobundle, object)
+
+def match_wobundlename(qwobundle, object):
+ qwobundle = path.basename(qwobundle)
+ return qwobundle in object.get('wobundlename', ())
+def wobundlename_matcher(qwobundle):
+ return lambda object: match_wobundlename(qwobundle, object)
+
+class Wobundle(Object):
+ ATTRS = XT(Object,
+ values=pathP, wobundle=mpathP, wobundledir=pathP,
+ basedir=pathP, dirspec=fileP)
+
+ FILESPECS = ['*.woa', '*.framework']
+
+ def _resolve(self, catalog):
+ if self.oid == '*': return
+ default = catalog.get(self.otype, '*', None, False)
+
+ wobundles = self.get('wobundle', [])
+ basedir = self.get('basedir', None)
+ if basedir is not None:
+ wobundles.extend(self.resolve_basedir(basedir, dirs=True, filespec=self.FILESPECS))
+ dirspec = self.get('dirspec', None)
+ if dirspec is not None:
+ wobundles.extend(self.resolve_filespec(dirspec, dirs=True))
+
+ if wobundles:
+ # générer wobundledir et wobundlename à partir de wobundle
+ wobundledirs = [path.dirname(wobundle) for wobundle in wobundles]
+ if wobundledirs: wobundledirs = self.wobundledir = ulistof(wobundledirs)
+
+ wobundlenames = [path.basename(wobundle) for wobundle in wobundles]
+ if wobundlenames: wobundlenames = self.wobundlename = ulistof(wobundlenames)
+
+ else:
+ # générer wobundles à partir de wobundledir et wobundlename
+ wobundledirs = self.get('wobundledir', ())
+ if not wobundledirs and default is not None:
+ wobundledirs = default.get('wobundledir', ())
+ if wobundledirs: wobundledirs = self.wobundledir = ulistof(wobundledirs)
+
+ wobundlenames = self.get('wobundlename', ())
+ if not wobundlenames: wobundlenames = ['%s.woa' % self.oid]
+ if wobundlenames: wobundlenames = self.wobundlename = ulistof(wobundlenames)
+
+ if wobundledirs:
+ wobundles = []
+ for wobundlename in wobundlenames:
+ found = []
+ for wobundledir in wobundledirs:
+ wobundle = path.join(wobundledir, wobundlename)
+ if path.exists(wobundle):
+ found.append(wobundle)
+ break
+ if not found:
+ found = [path.join(wobundledirs[0], wobundlename)]
+ wobundles.extend(found)
+ else:
+ wobundles = wobundlenames
+ if wobundles: wobundles = self.wobundle = ulistof(wobundles)
+
+ if not self.values:
+ self.values = wobundles
+
+################################################################################
+# Actions
+
+def option_choice(yesoption, nooption):
+ def func(value):
+ if istrue(value): return yesoption
+ else: return nooption
+ return func
+
+def what_choice(value):
+ if value in ('bundle',):
+ return '--bundle'
+ elif value in ('webres',):
+ return '--webres'
+ else:
+ log.warning("invalid what value %s", value)
+ return None
+
+def restart_choice(value):
+ if istrue(value) or value in ('restart', 'stop-start'):
+ return '--stop-start'
+ elif value in ('bounce',):
+ return '--bounce'
+ elif isfalse(value) or value in ('no-restart',):
+ return '--no-restart'
+ else:
+ log.warning("invalid restart value %s", value)
+ return None
+
+WOINST_ATTRS = {
+ 'prefix': dict(
+ func=option_choice('--prefix', '--no-prefix'),
+ ), 'what': dict(
+ func=what_choice,
+ ), 'tag': dict(
+ func=option_choice('--tag', '--no-tag'),
+ ), 'dbconfig': dict(
+ option='--active-dbconfig',
+ ), 'dbconfig_map': dict(
+ option='--active-dbconfig-map',
+ multiple=True,
+ flattensep=',',
+ ), 'restart': dict(
+ func=restart_choice,
+ ), 'exec': dict(
+ option='--exec',
+ multiple=True,
+ ),
+}
+
+def query_rwoinst(*args):
+ """afficher la commande pour déployer avec la commande $1 le wobundle $2 sur
+ l'hôte $3 dans le profil $4
+
+ $1 doit valoir 'rwoinst' ou être un chemin vers ce script
+
+ $2 peut être
+ * un nom de wobundle: tout les wobundles de ce nom sont sélectionnés
+ * un chemin complet: si un wobundle avec le chemin complet est trouvé, ne
+ sélectinner que celui-là, sinon faire comme si on n'avait spécifié que le
+ nom du wobundle
+ * non spécifié: tout les wobundles devant être déployé sur l'hôte sont
+ cherchés
+
+ $3 peut être
+ * un nom d'hôte: tous les hôtes de ce nom sont sélectionés
+ * un nom d'hôte pleinement qualifié: si le nom d'hôte pleinement qualifié
+ est trouvé, ne sélectionner que celui-là, sinon faire comme si on n'avait
+ spécifié que le nom d'hôte
+ * non spécifié: tous les hôtes vers lequel doit être déployé le wobundle sont
+ cherchés
+
+ $4 peut valoir
+ * 'NONE': seuls les déploiements sans profils définis sont sélectionnés.
+ c'est la valeur par défaut.
+ * 'ALL' ou '': ne pas tenir compte du profil lors de la sélection des
+ wobundles et des hôtes
+ * toute autre valeur, e.g prod ou test: seuls les déploiement de ce profil
+ sont sélectionnés
+ Il est possible de spécifier plusieurs profils en les séparant par des
+ virgules. Par exemple, 'NONE,prod' permet de sélectionner les déploiements
+ sans profil ou dans le profil 'prod'
+
+ le wobundle, ou l'hôte, ou les deux sont requis. le profil est facultatif.
+
+ Les valeurs $5..$* sont des définitions d'attributs utilisées pour mettre à
+ jour les faits trouvés. Les mappings suivants sont supportés:
+
+ = attribut = = option de woinst =
+ what --bundle / --webres
+ dbconfig --active-dbconfig
+ dbconfig_map --active-dbconfig-map
+ restart --stop-start / --bounce
+ exec --exec
+ tag --tag / --no-tag
+ prefix --prefix / --no-prefix
+ destdir HTDOCSDIR=
+
+ Les autres définitions sont utilisées comme argument de woinst, pour définir
+ les valeurs des préfixes.
+ """
+ rwoinst = args[0] if args[0:1] else None
+ if rwoinst is not None and (rwoinst == 'rwoinst' or rwoinst.endswith('/rwoinst')):
+ verb = 'rwoinst'
+ else:
+ raise ValueError("Le verbe est requis et doit valoir 'rwoinst'")
+ qwobundle = args[1:2] and args[1] or None
+ qhost = args[2:3] and args[2] or None
+ qprofile = args[3] if args[3:4] else 'NONE'
+ supplattrs = args[4:]
+
+ if not qwobundle and not qhost:
+ raise ValueError("Il faut spécifier wobundle et/ou host")
+
+ if not qwobundle:
+ wobundles = None
+ elif cwithpath(qwobundle):
+ qwobundle = path.abspath(qwobundle)
+ wobundles = catalog.find_objects('wobundle', expr=wobundle_matcher(qwobundle))
+ if not wobundles:
+ wobundles = catalog.find_objects('wobundle', expr=wobundlename_matcher(qwobundle))
+ else:
+ wobundles = catalog.find_objects('wobundle', expr=wobundlename_matcher(qwobundle))
+
+ if not qhost:
+ hosts = None
+ else:
+ if cwithpath(qhost):
+ qhost = path.basename(path.abspath(qhost))
+ if withdomain(qhost):
+ hosts = catalog.find_objects('host', expr=host_matcher(qhost))
+ if not hosts:
+ hosts = catalog.find_objects('host', expr=hostname_matcher(qhost))
+ else:
+ hosts = catalog.find_objects('host', expr=hostname_matcher(qhost))
+
+ if qprofile == '': qprofile = 'ALL'
+ qprofiles = flattenstr([qprofile])
+ if 'ALL' in qprofiles:
+ qprofile = None
+ else:
+ expr = []
+ for qprofile in qprofiles:
+ if qprofile == 'NONE':
+ qprofile = NONE(EXISTS('profile'))
+ else:
+ qprofile = dict(profile=qprofile)
+ expr.append(qprofile)
+ qprofile = ANY(*expr)
+
+ # wobundles et hosts sont spécifiés
+ if wobundles is not None and hosts is not None:
+ facts = catalog.find_facts(
+ verb=verb,
+ tsotype='wobundle', tsexpr=dict(oid=[wobundle.oid for wobundle in wobundles]),
+ ttotype='host', ttexpr=dict(oid=[host.oid for host in hosts]),
+ expr=qprofile,
+ )
+
+ # Seuls les wobundles sont spécifiés: chercher les hôtes
+ elif wobundles is not None:
+ facts = catalog.find_facts(
+ verb=verb,
+ tsotype='wobundle', tsexpr=dict(oid=[wobundle.oid for wobundle in wobundles]),
+ ttotype='host',
+ expr=qprofile,
+ )
+
+ # Seuls les hôtes sont spécifiés: chercher les wobundles
+ elif hosts is not None:
+ facts = catalog.find_facts(
+ verb=verb,
+ tsotype='wobundle',
+ ttotype='host', ttexpr=dict(oid=[host.oid for host in hosts]),
+ expr=qprofile,
+ )
+
+ # afficher la commande
+ if supplattrs: parser = Parser()
+ for fact, tsobjects, ttobjects in facts:
+ if supplattrs: parser.parse_attrs(supplattrs, fact)
+ hs = ':'.join(flattenseq([host.host for host in ttobjects]))
+
+ for wobundle in tsobjects:
+ # construire les options de woinst. on prend les valeurs d'abord
+ # dans le fait puis dans l'objet wobundle.
+ options = []
+ vars = []
+ names = set(fact.attrs.keys())
+ names.update(wobundle.attrs.keys())
+ for name in names:
+ values = fact.get(name, None)
+ factvalue = True
+ if values is None:
+ values = wobundle.get(name, None)
+ factvalue = False
+ if values is None:
+ # ne devrait pas se produire en principe
+ continue
+ if name in ('profile',):
+ # les attributs de sélection du profil ont été déjà été traités
+ # plus haut
+ continue
+ elif name == 'destdir':
+ name = 'HTDOCSDIR'
+ params = WOINST_ATTRS.get(name, None)
+ if params is None:
+ if factvalue:
+ # les variables spécifiques ne sont prise que dans le
+ # fait.
+ vars.append("%s=%s" % (name, qshell(':'.join(values))))
+ else:
+ func = params.get('func', None)
+ option = params.get('option', None)
+ if func is not None:
+ option = func(values[0])
+ if option is not None:
+ options.extend(listof(option))
+ elif option is not None:
+ if params.get('multiple', False):
+ flattensep = params.get('flattensep', None)
+ if flattensep is not None:
+ values = flattenstr(values, flattensep)
+ for value in values:
+ options.append(option)
+ options.append(qshell(value))
+ else:
+ options.append(option)
+ options.append(qshell(values[0]))
+ else:
+ raise ValueError("missing option key for attribute %s" % name)
+
+ for w in wobundle.wobundle:
+ # préférer si possible le chemin fourni par l'utilisateur
+ if withpath(qwobundle): w = qwobundle
+ parts = [rwoinst, '--no-deploydb', '-yh', qshell(hs), qshell(w)]
+ if options or vars:
+ parts.append('--')
+ if options: parts.extend(options)
+ if vars: parts.extend(vars)
+ print ' '.join(parts)
diff --git a/lib/nulib/python/deploydb/wyinst_module.py b/lib/nulib/python/deploydb/wyinst_module.py
new file mode 100644
index 0000000..6adac71
--- /dev/null
+++ b/lib/nulib/python/deploydb/wyinst_module.py
@@ -0,0 +1,223 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = (
+ 'Wyapp',
+ 'wyapp_matcher', 'wyappname_matcher',
+)
+
+import logging; log = logging.getLogger(__name__)
+import os, sys
+from os import path
+
+from .utils import *
+from .expr import *
+from .objects import XT, fileP, pathP, mpathP, lowerP, Object, catalog
+from .parser import Parser
+from .base_module import withdomain, host_matcher, hostname_matcher
+
+################################################################################
+# Wyapp
+
+def match_wyapp(qwyapp, object):
+ if withpath(qwyapp): # wyapp avec chemin
+ return qwyapp in object.get('wyapp', ())
+ else: # nom de wyapp
+ return qwyapp in object.get('wyappname', ())
+def wyapp_matcher(qwyapp):
+ return lambda object: match_wyapp(qwyapp, object)
+
+def match_wyappname(qwyapp, object):
+ qwyapp = path.basename(qwyapp)
+ return qwyapp in object.get('wyappname', ())
+def wyappname_matcher(qwyapp):
+ return lambda object: match_wyappname(qwyapp, object)
+
+class Wyapp(Object):
+ ATTRS = XT(Object,
+ values=pathP, wyapp=mpathP, wyappdir=pathP,
+ basedir=pathP, dirspec=fileP)
+
+ def _resolve(self, catalog):
+ if self.oid == '*': return
+ default = catalog.get(self.otype, '*', None, False)
+
+ wyapps = self.get('wyapp', [])
+ basedir = self.get('basedir', None)
+ if basedir is not None:
+ wyapps.extend(self.resolve_basedir(basedir, dirs=True))
+ dirspec = self.get('dirspec', None)
+ if dirspec is not None:
+ wyapps.extend(self.resolve_filespec(dirspec, dirs=True))
+
+ if wyapps:
+ # générer wyappdir et wyappname à partir de wyapp
+ wyappdirs = [path.dirname(wyapp) for wyapp in wyapps]
+ if wyappdirs: wyappdirs = self.wyappdir = ulistof(wyappdirs)
+
+ wyappnames = [path.basename(wyapp) for wyapp in wyapps]
+ if wyappnames: wyappnames = self.wyappname = ulistof(wyappnames)
+
+ else:
+ # générer wyapps à partir de wyappdir et wyappname
+ wyappdirs = self.get('wyappdir', ())
+ if not wyappdirs and default is not None:
+ wyappdirs = default.get('wyappdir', ())
+ if wyappdirs: wyappdirs = self.wyappdir = ulistof(wyappdirs)
+
+ wyappnames = self.get('wyappname', ())
+ if not wyappnames: wyappnames = [self.oid]
+ if wyappnames: wyappnames = self.wyappname = ulistof(wyappnames)
+
+ if wyappdirs:
+ wyapps = []
+ for wyappname in wyappnames:
+ found = []
+ for wyappdir in wyappdirs:
+ wyapp = path.join(wyappdir, wyappname)
+ if path.exists(wyapp):
+ found.append(wyapp)
+ break
+ if not found:
+ found = [path.join(wyappdirs[0], wyappname)]
+ wyapps.extend(found)
+ else:
+ wyapps = wyappnames
+ if wyapps: wyapps = self.wyapp = ulistof(wyapps)
+
+ if not self.values:
+ self.values = wyapps
+
+################################################################################
+# Actions
+
+def query_rwyinst(*args):
+ """afficher la commande pour déployer avec la commande $1 le wyapp $2 sur
+ l'hôte $3 dans le profil $4
+
+ $1 doit valoir 'rwyinst' ou être un chemin vers ce script
+
+ $2 peut être
+ * un nom de wyapp: tout les wyapps de ce nom sont sélectionnés
+ * un chemin complet: si un wyapp avec le chemin complet est trouvé, ne
+ sélectinner que celui-là, sinon faire comme si on n'avait spécifié que le
+ nom du wyapp
+ * non spécifié: tout les wyapps devant être déployé sur l'hôte sont
+ cherchés
+
+ $3 peut être
+ * un nom d'hôte: tous les hôtes de ce nom sont sélectionés
+ * un nom d'hôte pleinement qualifié: si le nom d'hôte pleinement qualifié
+ est trouvé, ne sélectionner que celui-là, sinon faire comme si on n'avait
+ spécifié que le nom d'hôte
+ * non spécifié: tous les hôtes vers lequel doit être déployé le wyapp sont
+ cherchés
+
+ $4 peut valoir
+ * 'NONE': seuls les déploiements sans profils définis sont sélectionnés.
+ c'est la valeur par défaut.
+ * 'ALL' ou '': ne pas tenir compte du profil lors de la sélection des
+ wyapps et des hôtes
+ * toute autre valeur, e.g prod ou test: seuls les déploiement de ce profil
+ sont sélectionnés
+ Il est possible de spécifier plusieurs profils en les séparant par des
+ virgules. Par exemple, 'prod,NONE' permet de sélectionner les déploiements
+ sans profil ou dans le profil 'prod'
+
+ le wyapp, ou l'hôte, ou les deux sont requis. le profil est facultatif.
+
+ Les valeurs $5..$* sont des définitions d'attributs utilisées pour mettre à
+ jour les faits trouvés. Ces définitions sont utilisés comme argument de
+ wyinst.
+ """
+ rwyinst = args[0] if args[0:1] else None
+ if rwyinst is not None and (rwyinst == 'rwyinst' or rwyinst.endswith('/rwyinst')):
+ verb = 'rwyinst'
+ else:
+ raise ValueError("Le verbe est requis et doit valoir 'rwyinst'")
+ qwyapp = args[1:2] and args[1] or None
+ qhost = args[2:3] and args[2] or None
+ qprofile = args[3] if args[3:4] else 'NONE'
+ supplattrs = args[4:]
+
+ if not qwyapp and not qhost:
+ raise ValueError("Il faut spécifier wyapp et/ou host")
+
+ if not qwyapp:
+ wyapps = None
+ elif cwithpath(qwyapp):
+ qwyapp = path.abspath(qwyapp)
+ wyapps = catalog.find_objects('wyapp', expr=wyapp_matcher(qwyapp))
+ if not wyapps:
+ wyapps = catalog.find_objects('wyapp', expr=wyappname_matcher(qwyapp))
+ else:
+ wyapps = catalog.find_objects('wyapp', expr=wyappname_matcher(qwyapp))
+
+ if not qhost:
+ hosts = None
+ else:
+ if cwithpath(qhost):
+ qhost = path.basename(path.abspath(qhost))
+ if withdomain(qhost):
+ hosts = catalog.find_objects('host', expr=host_matcher(qhost))
+ if not hosts:
+ hosts = catalog.find_objects('host', expr=hostname_matcher(qhost))
+ else:
+ hosts = catalog.find_objects('host', expr=hostname_matcher(qhost))
+
+ if qprofile == '': qprofile = 'ALL'
+ qprofiles = flattenstr([qprofile])
+ if 'ALL' in qprofiles:
+ qprofile = None
+ else:
+ expr = []
+ for qprofile in qprofiles:
+ if qprofile == 'NONE':
+ qprofile = NONE(EXISTS('profile'))
+ else:
+ qprofile = dict(profile=qprofile)
+ expr.append(qprofile)
+ qprofile = ANY(*expr)
+
+ # wyapps et hosts sont spécifiés
+ if wyapps is not None and hosts is not None:
+ facts = catalog.find_facts(
+ verb=verb,
+ tsotype='wyapp', tsexpr=dict(oid=[wyapp.oid for wyapp in wyapps]),
+ ttotype='host', ttexpr=dict(oid=[host.oid for host in hosts]),
+ expr=qprofile,
+ )
+
+ # Seuls les wyapps sont spécifiés: chercher les hôtes
+ elif wyapps is not None:
+ facts = catalog.find_facts(
+ verb=verb,
+ tsotype='wyapp', tsexpr=dict(oid=[wyapp.oid for wyapp in wyapps]),
+ ttotype='host',
+ expr=qprofile,
+ )
+
+ # Seuls les hôtes sont spécifiés: chercher les wyapps
+ elif hosts is not None:
+ facts = catalog.find_facts(
+ verb=verb,
+ tsotype='wyapp',
+ ttotype='host', ttexpr=dict(oid=[host.oid for host in hosts]),
+ expr=qprofile,
+ )
+
+ # afficher la commande
+ if supplattrs: parser = Parser()
+ for fact, tsobjects, ttobjects in facts:
+ hs = flattenseq([host.host for host in ttobjects])
+ ws = flattenseq([wyapp.wyapp for wyapp in tsobjects])
+ if supplattrs: parser.parse_attrs(supplattrs, fact)
+ vars = []
+ for name, values in fact.attrs.items():
+ vars.append("%s=%s" % (name, qshell(':'.join(values))))
+ for w in ws:
+ # préférer si possible le chemin fourni par l'utilisateur
+ if withpath(qwyapp): w = qwyapp
+ hs = ':'.join(hs)
+ parts = [xwyinst, '--no-deploydb', '-h', qshell(hs), qshell(w), '--', '-y']
+ if vars: parts.append(' '.join(vars))
+ print ' '.join(parts)
diff --git a/lib/nulib/python/nulib/__init__.py b/lib/nulib/python/nulib/__init__.py
new file mode 100644
index 0000000..9d853e8
--- /dev/null
+++ b/lib/nulib/python/nulib/__init__.py
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ()
+
diff --git a/lib/nulib/python/nulib/args.py b/lib/nulib/python/nulib/args.py
new file mode 100644
index 0000000..b2627c5
--- /dev/null
+++ b/lib/nulib/python/nulib/args.py
@@ -0,0 +1,610 @@
+# -*- coding: utf-8 -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+"""Gestion des arguments de la ligne de commande.
+"""
+
+__all__ = ('split_args', 'join_args', 'build_options', 'get_args',
+ 'Options',
+ )
+
+import sys, re
+from getopt import gnu_getopt
+
+from .base import isstr, isbool, seqof, odict
+from .output import set_verbosity, VERBOSITY_OPTS
+from .input import set_interaction, INTERACTION_OPTS
+from .functions import apply_args
+
+RE_SPACES = re.compile(r'[ \t\r\n]+')
+RE_QUOTE = re.compile(r'"')
+RE_QQUOTE = re.compile(r'\\"')
+RE_SPACES_OR_QUOTES = re.compile(r'[ \t\r\n"]+')
+RE_TOKEN = re.compile(r'[^ \t\r\n"]+')
+RE_QTOKEN = re.compile(r'"((?:\\"|[^"])*)"?')
+
+def has_spaces(cl):
+ return RE_SPACES.match(cl) is not None
+def skip_spaces(pcl):
+ mo = RE_SPACES.match(pcl[0])
+ if mo is not None:
+ pcl[0] = pcl[0][mo.end(0):]
+def get_token(pcl):
+ token = None
+ mo = RE_TOKEN.match(pcl[0])
+ if mo is not None:
+ token, pcl[0] = pcl[0][:mo.end(0)], pcl[0][mo.end(0):]
+ return token
+def get_qtoken(pcl):
+ qtoken = None
+ mo = RE_QTOKEN.match(pcl[0])
+ if mo is not None:
+ qtoken, pcl[0] = mo.group(1), pcl[0][mo.end(0):]
+ return qtoken
+
+def split_args(cl):
+ """Lire une chaine, et la découper en plusieurs arguments, à utiliser par
+ exemple avec getopt() ou get_args().
+
+ Note: les arguments peuvent être entre quotes, mais pour le moment, seul "
+ est supporté, pas '.
+ XXX ajouter le support de ' comme quote.
+
+ @return: la liste des arguments, ou None si cl==None
+ @rtype: list
+ """
+ if cl is None: return None
+
+ args = []
+ pcl = [cl]
+ while pcl[0]:
+ if has_spaces(pcl[0]):
+ skip_spaces(pcl)
+ if not pcl[0]:
+ break
+
+ arg = ''
+ while pcl[0] and not has_spaces(pcl[0]):
+ if pcl[0][:1] == '"':
+ arg = arg + RE_QQUOTE.sub('"', get_qtoken(pcl))
+ else:
+ arg = arg + get_token(pcl)
+
+ args.append(arg)
+
+ return args
+
+def join_args(args):
+ """L'opération inverse de split_args
+
+ @return: une chaine, ou None si args==None
+ """
+ if args is None: return None
+ i = 0
+ for i in range(len(args)):
+ arg = args[i]
+ if not args or RE_SPACES_OR_QUOTES.search(arg) is not None:
+ args[i] = '"%s"' % RE_QUOTE.sub(r'\"', arg)
+ return ' '.join(args)
+
+def build_options(argsdesc):
+ """Construire une liste d'options pour utilisation avec get_args ou getopt.
+
+ A partir d'une liste de termes (option, longoptions, desc), construire et
+ retourner (options, longoptions), où options est un chaine et longoptions
+ une liste, pour utilisation avec getopt() ou get_args().
+
+ @return: (options, longoptions)
+ @rtype: tuple
+ """
+ options = ''
+ longoptions = []
+ if argsdesc is not None:
+ for argdesc in argsdesc:
+ if argdesc[0:1] and argdesc[0] is not None:
+ options += argdesc[0]
+ if argdesc[1:2] and argdesc[1] is not None:
+ longopts = argdesc[1]
+ if isstr(longopts): longopts = (longopts,)
+ longoptions.extend(filter(None, longopts))
+ return options, longoptions
+
+# options courtes à faire traiter par set_verbosity() ou set_interaction()
+M_OPTIONS = {}
+# options longues à faire traiter par set_verbosity() ou set_interaction()
+M_LONGOPTIONS = {}
+for _opt in VERBOSITY_OPTS:
+ if _opt.startswith('--'): M_LONGOPTIONS[_opt] = False
+ elif _opt.startswith('-'): M_OPTIONS[_opt] = False
+for _opt in INTERACTION_OPTS:
+ if _opt.startswith('--'): M_LONGOPTIONS[_opt] = False
+ elif _opt.startswith('-'): M_OPTIONS[_opt] = False
+del _opt
+
+RE_OPTION = re.compile(r'.:?')
+def get_args(args=None, options=None, longoptions=None, **optdescs):
+ """frontend pour getopt qui reconnait les options de set_verbosity et
+ set_interaction(), et mets à jour les niveaux automatiquement.
+ """
+ if args is None: args = sys.argv[1:]
+ if options is None: options = ''
+ longoptions = seqof(longoptions, [])
+
+ options = RE_OPTION.findall(options)
+ longoptions = list(longoptions)
+
+ def in_options(opt, options=options):
+ """Retourner True si l'option opt est mentionnée dans options, sans
+ tenir compte du fait qu'elle prend ou non un argument dans options.
+
+ Si opt n'est pas mentionné dans options, l'y rajouter.
+ opt doit être de la forme 'o' ou 'o:'
+ """
+ normopt = opt[:1]
+ for option in options:
+ normoption = option[:1]
+ if normopt == normoption: return True
+ options.append(opt)
+ return False
+ def in_longoptions(longopt, longoptions=longoptions):
+ """Retourner True si l'option longue longopt est mentionnée dans
+ longoptions, sans tenir compte du fait qu'elle prend ou non un argument
+ dans longoptions.
+
+ Si longopt n'est pas mentionné dans longoptions, l'y rajouter.
+ longopt doit être de la forme 'longopt' ou 'longopt='
+ """
+ if longopt[-1:] == '=': normlongopt = longopt[:-1]
+ else: normlongopt = longopt
+ for longoption in longoptions:
+ if longoption[-1:] == '=': normlongoption = longoption[:-1]
+ else: normlongoption = longoption
+ if normlongopt == normlongoption: return True
+ longoptions.append(longopt)
+ return False
+
+ # déterminer quelles options seront reconnues par set_verbosity. il s'agit
+ # de toutes celles qui ne sont pas traitées par l'utilisateur
+ m_options = M_OPTIONS.copy()
+ m_longoptions = M_LONGOPTIONS.copy()
+
+ for m_option in m_options.keys():
+ # m_option est de la forme '-o'
+ if not in_options(m_option[1:]):
+ m_options[m_option] = True
+ for m_longoption in m_longoptions.keys():
+ # m_longoption est de la forme '--longopt'
+ if not in_longoptions(m_longoption[2:]):
+ m_longoptions[m_longoption] = True
+
+ # appliquer les options reconnues par set_verbosity
+ options = ''.join(options)
+ optvalues, args = gnu_getopt(args, options, longoptions)
+ for i in range(len(optvalues)):
+ opt, _ = optvalues[i]
+ set_verbosity_or_interaction = False
+ if m_longoptions.get(opt, False): # long options
+ set_verbosity_or_interaction = True
+ elif m_options.get(opt, False): # options
+ set_verbosity_or_interaction = True
+ if set_verbosity_or_interaction:
+ if opt in VERBOSITY_OPTS:
+ set_verbosity(opt)
+ elif opt in INTERACTION_OPTS:
+ set_interaction(opt)
+ optvalues[i] = None
+
+ # retourner les autres options qui n'ont pas été reconnues
+ return filter(None, optvalues), args
+
+################################################################################
+
+_none = object()
+
+RE_PREFIX = re.compile(r'^-*')
+RE_SUFFIX = re.compile(r'[:=]$')
+RE_STUFF = re.compile(r'[^a-zA-Z0-9]')
+def opt2name(opt):
+ """Obtenir un nom de variable dérivé d'un nom d'option
+
+ Les tirets de début et les caractères : et = de fin sont supprimés, et les
+ caractères spéciaux sont remplacés par '_'
+ """
+ name = RE_PREFIX.sub('', opt)
+ name = RE_SUFFIX.sub('', name)
+ name = RE_STUFF.sub('_', name)
+ return name
+
+class Option(object):
+ """Un objet stockant la description d'une option unique
+
+ optdef définition de l'option, e.g. 'o', 'o:', 'long-option', ou
+ 'long-option='
+ optname nom de l'option, e.g. 'o' ou 'long-option'
+ short est-ce une option courte?
+ takes_value
+ cette option prend-elle un argument?
+
+ action action associée à cette option.
+ name nom de la variable associée à l'option.
+ """
+
+ _short, short = None, property(lambda self: self._short)
+ _optdef, optdef = None, property(lambda self: self._optdef)
+ _optname, optname = None, property(lambda self: self._optname)
+ _takes_value, takes_value = None, property(lambda self: self._takes_value)
+
+ def __init(self, short, optdef, optname, takes_value):
+ self._short = short
+ self._optdef = optdef
+ self._optname = optname
+ self._takes_value = takes_value
+
+ _action, action = None, property(lambda self: self._action)
+ _name, name = None, property(lambda self: self._name)
+
+ LONGOPTION_PATTERN = r'(([a-zA-Z0-9$*@!_][a-zA-Z0-9$*@!_-]*)=?)'
+ RE_LONGOPTION0 = re.compile(r'--%s$' % LONGOPTION_PATTERN)
+ RE_LONGOPTION1 = re.compile(r'%s$' % LONGOPTION_PATTERN)
+ OPTION_PATTERN = r'(([a-zA-Z0-9$*@!_]):?)'
+ RE_OPTION0 = re.compile(r'-%s$' % OPTION_PATTERN)
+ RE_OPTION1 = re.compile(r'%s$' % OPTION_PATTERN)
+
+ def __init__(self, optdef):
+ if not optdef: raise ValueError("optdef is required")
+
+ mo = self.RE_LONGOPTION0.match(optdef)
+ if mo is not None:
+ self.__init(False, mo.group(1), mo.group(2), mo.group(1) != mo.group(2))
+ else:
+ mo = self.RE_OPTION0.match(optdef)
+ if mo is not None:
+ self.__init(True, mo.group(1), mo.group(2), mo.group(1) != mo.group(2))
+ else:
+ mo = self.RE_OPTION1.match(optdef)
+ if mo is not None:
+ self.__init(True, mo.group(1), mo.group(2), mo.group(1) != mo.group(2))
+ else:
+ mo = self.RE_LONGOPTION1.match(optdef)
+ if mo is not None:
+ self.__init(False, mo.group(1), mo.group(2), mo.group(1) != mo.group(2))
+ else:
+ raise ValueError("Invalid option: %s" % optdef)
+
+ def __str__(self):
+ prefix = self._short and '-' or '--'
+ return '%s%s' % (prefix, self._optname)
+ str = __str__
+ opt = property(__str__)
+
+ def __repr__(self):
+ option = self.__str__()
+ if self._takes_value:
+ if self._short: option += ':'
+ else: option += '='
+ return '%s(%s)' % (self.__class__.__name__, repr(option))
+ repr = __repr__
+
+ def same_optdef(self, other):
+ return isinstance(other, Option) and self._optdef == other.optdef
+ def same_optname(self, other):
+ return isinstance(other, Option) and \
+ self._optname == other.optname and \
+ self._takes_value == other.takes_value
+ def __eq__(self, other):
+ if isstr(other):
+ return self.__str__() == other
+ elif isinstance(other, Option):
+ return self._optdef == other.optdef
+ else:
+ return False
+
+ def set_action(self, action, name=None):
+ self._action = action
+ self._name = name
+
+class Action(object):
+ """Une action associée à une option quand elle est rencontrée sur la ligne
+ de commande.
+
+ name nom de la variable associée à l'option, None s'il faut le calculer
+ initial si une valeur est associée à l'option, valeur initiale de cette
+ option.
+
+ Cet objet doit implémenter une méthode __call__() qui prend les arguments
+ (option[, value[, options]])
+ La méthode doit retourner False si elle veut indiquer qu'elle n'a pas pu
+ mettre à jour la valeur. Tout autre valeur indique le succès.
+
+ option est une instance de Option. value est la valeur associée à l'option,
+ ou _none si l'option ne prend pas d'argument. options est l'instance de
+ l'objet Options qui analyse les arguments.
+ """
+
+ name = property(lambda self: None)
+ initial = property(lambda self: None)
+
+ def __call__(self, option=None, value=_none, options=None):
+ pass
+
+class Options(object):
+ """Une classe permettant de traiter des arguments en ligne de commande.
+
+ Son objectif est d'offrir une solution plus flexible que les fonctions
+ build_options et get_args()
+
+ Avec le constructeur et la méthode add_option(), il est possible de
+ construire la liste des options valides.
+
+ Ensuite, la méthode parse() permet d'analyser la ligne de commande. Par
+ défaut, si une méthode n'est pas définie pour une option, ou si la méthode
+ définie retourne False, initialiser une variable nommée d'après l'option, en
+ remplaçant sa valeur (si l'option prend un argument) ou lui ajoutant 1 (si
+ l'option ne prend pas d'argument).
+ """
+
+ class SetValue(Action):
+ """Mettre à jour une variable
+
+ value valeur qu'il faut forcer, ou _none s'il faut prendre la valeur par
+ défaut. Si l'option prend un argument, la valeur par défaut est la
+ valeur spécifiée sur la ligne de commande. Sinon, il s'agit d'une
+ valeur incrémentée représentant le nombre de fois que l'option
+ apparait.
+ name nom de la variable à initialiser, ou None s'il faut dériver le nom
+ de la variable à partir du nom de l'option.
+ initial valeur initiale de la variable
+ """
+
+ _value = None
+ _name, name = None, property(lambda self: self._name)
+ _initial, initial = None, property(lambda self: self._initial)
+
+ def __init__(self, value=_none, name=None, initial=None):
+ self._value = value
+ self._name = name
+ self._initial = initial
+
+ def __call__(self, option=None, value=_none, options=None):
+ # nom: celui qui est spécifié dans le constructeur, ou un nom dérivé du
+ # nom de l'option
+ name = self._name
+ if name is None: name = opt2name(option.optname)
+ # valeur: celle qui est spécifiée dans le constructeur, ou alors laisser
+ # options sans charger
+ if self._value is not _none: value = self._value
+
+ # mettre à jour la valeur
+ options.update_value(option, value)
+
+ class CallMethod(Action):
+ _method = None
+
+ def __init__(self, method=None):
+ self._method = method
+
+ def __call__(self, option=None, value=None, options=None):
+ return apply_args(self._method, option, value, options)
+
+ # type d'analyse: '+' pour s'arrêter à la première non option, '' sinon
+ _parseopt = None
+
+ # liste d'options courtes, instances de Option
+ _soptions = None
+
+ # liste d'options longues, instances de Option
+ _loptions = None
+
+ # valeurs stockées dans cet objet
+ _values = None
+
+ # dictionnaire des options définies, avec chacune une instance de Option
+ # associée
+ _options = None
+
+ ############################################################################
+ # Constructeur
+
+ def __init__(self, *optdescs):
+ """Initialiser l'objet avec un ensemble d'argument de la forme
+
+ (options, longoptions, desc)
+
+ où options est une chaine avec des lettres de la forme 'o' ou 'o:',
+ longoptions une liste de chaines de la forme 'option' ou 'option=', et
+ desc une chaine quelconque.
+
+ Ce format est pour assurer la compatibilité avec la fonction
+ build_options()
+ """
+ super(Options, self).__init__()
+ object.__setattr__(self, '_parseopt', '')
+ object.__setattr__(self, '_soptions', [])
+ object.__setattr__(self, '_loptions', [])
+ object.__setattr__(self, '_values', {})
+ object.__setattr__(self, '_options', {})
+
+ self.add_option(VERBOSITY_OPTS, set_verbosity)
+ self.add_option(INTERACTION_OPTS, set_interaction)
+ for optdesc in optdescs:
+ options = filter(None, optdesc[:2])
+ desc = optdesc[2:3] and optdesc[2] or None
+ self.add_option(options, None, desc)
+
+ def __option(self, opt):
+ """Obtenir l'instance de Option correspondant à l'argument
+ """
+ if isinstance(opt, Option): return opt
+ if not opt.startswith('-'):
+ if len(opt) == 1: opt = '-' + opt
+ else: opt = '--' + opt
+ option = self._options.get(opt, None)
+ if option is None: raise ValueError("Unknown option: %s" % opt)
+ return option
+
+ def add_option(self, options=None, action=None, desc=None):
+ """Ajouter une option
+
+ options peut être une chaine de l'une des formes suivantes:
+
+ '+' arrêter l'analyse à la première non-option (configuration de gnu_getopt)
+ 'o', '-o', 'o:', '-o:'
+ option courte sans et avec argument
+ 'longo', '--longo', 'longo=', '--longo='
+ option longue sans et avec argument
+
+ options peut aussi être une liste de ces chaines
+ """
+ default_name = None
+ for opt in filter(None, seqof(options, ())):
+ # traiter la configuration de l'analyse '+'
+ if opt.startswith('+'):
+ self._parseopt = '+'
+ opt = opt[1:]
+ if not opt: continue
+
+ # nom par défaut
+ if default_name is None:
+ default_name = opt2name(opt)
+
+ # option
+ option = Option(opt)
+
+ # action
+ if isinstance(action, Action):
+ # action déjà spécifiée
+ pass
+ elif action is None:
+ # pas d'action: mettre à jour la variable d'après le nom de la
+ # première option
+ action = Options.SetValue(name=default_name)
+ elif isstr(action):
+ # mettre à jour la variable nommée d'après l'action
+ action = Options.SetValue(name=action)
+ elif callable(action):
+ # appeler l'action
+ action = Options.CallMethod(action)
+ else:
+ raise ValueError("Unsupported action: %s" % repr(action))
+
+ name = action.name
+ if name is None: name = default_name
+
+ option.set_action(action, name)
+
+ # si une précédente option est définie, il faut la remplacer
+ self._soptions = filter(lambda soption: not soption.same_optname(option), self._soptions)
+ self._loptions = filter(lambda loption: not loption.same_optname(option), self._loptions)
+
+ # nouvelle option
+ if option.short: self._soptions.append(option)
+ else: self._loptions.append(option)
+ self._options[option.opt] = option
+
+ # valeur initiale
+ # ne spécifier la valeur initiale que si elle n'existe pas déjà
+ if not self.has_value(option):
+ self.set_value(option, action.initial)
+
+ return self
+
+ ############################################################################
+ # Gestion des valeurs
+
+ def __getitem__(self, key):
+ return self._values[key]
+ def __setitem__(self, key, value):
+ self._values[key] = value
+ def __delitem__(self, key):
+ del self._values[key]
+ def get(self, key, default=None):
+ return self._values.get(key, default)
+ def __getattr__(self, key, default=_none):
+ try:
+ if default is _none: return self._values[key]
+ else: return self._values.get(key, default)
+ except KeyError: raise AttributeError(key)
+ def __setattr__(self, key, value):
+ if self._values.has_key(key): self._values[key] = value
+ else: return super(Options, self).__setattr__(key, value)
+ def __delattr__(self, key):
+ try: del self._values[key]
+ except KeyError: raise AttributeError(key)
+
+ def get_value(self, option, default=_none):
+ """Obtenir la valeur correspondant à l'option
+ """
+ option = self.__option(option)
+ return self.get(option.name, default)
+ def has_value(self, option):
+ option = self.__option(option)
+ return self._values.has_key(option.name)
+ def set_value(self, option, value):
+ """Spécifier la valeur correspondant à l'option
+ """
+ option = self.__option(option)
+ self._values[option.name] = value
+ return True
+
+ def update_value(self, option, value=_none):
+ option = self.__option(option)
+ if value is _none:
+ if option.takes_value:
+ raise ValueError("Required value")
+ else:
+ value = self.get_value(option, None)
+ if value is None: value = 0
+ self.set_value(option, value + 1)
+ else:
+ self.set_value(option, value)
+
+ ############################################################################
+ # Exploitation
+
+ def get_args(self, args=None):
+ """Analyser les arguments à la recherche des options valides. Si
+ args==None, prendre sys.argv[1:]
+
+ @return (optvalues, args)
+
+ optvalues est une liste de tuple (opt, value) correspondant à toutes les
+ options qui ont été analysées par gnu_getopt(). args est la liste des
+ arguments qui ne sont pas des options.
+ """
+ if args is None: args = sys.argv[1:]
+ soptions = self._parseopt + ''.join([option.optdef for option in self._soptions])
+ loptions = [option.optdef for option in self._loptions]
+ optvalues, args = gnu_getopt(args, soptions, loptions)
+ return filter(None, optvalues), args
+
+ _parsed_names = None
+
+ def parse(self, args=None, optvalues=None):
+ """Traiter les options analysées par get_args(). Si optvalues==None,
+ analyser les arguments de args avec get_args() d'abord.
+
+ @return (roptvalues, args)
+
+ optvalues est une liste de tuple (opt, value) correspondant à toutes les
+ options qui ont été analysées, mais n'ont pas pu être traitées par cet
+ objet.
+
+ args est la liste des arguments qui ne sont pas des options.
+ """
+ self._parsed_names = {}
+ if optvalues is None: optvalues, args = self.get_args(args)
+ roptvalues = []
+ for opt, value in optvalues:
+ option = self.__option(opt)
+ self._parsed_names[option.name] = True
+ if not option.takes_value: value = _none
+ if option.action(option, value, self) == False:
+ roptvalues.append((opt, value))
+ self.update_value(option, value)
+ return roptvalues, args
+
+ def was_parsed(self, name):
+ """Indiquer si une option correspondant à la variable name a été
+ mentionnée sur la ligne de commande.
+ """
+ if self._parsed_names is None: return False
+ return self._parsed_names.has_key(name)
diff --git a/lib/nulib/python/nulib/base.py b/lib/nulib/python/nulib/base.py
new file mode 100644
index 0000000..075b77f
--- /dev/null
+++ b/lib/nulib/python/nulib/base.py
@@ -0,0 +1,476 @@
+# -*- coding: utf-8 -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+"""Fonctions de base
+"""
+
+__all__ = ('myself', 'mydir', 'myname',
+ 'Undef',
+ 'updated', 'updatem', 'odict', 'ncdict',
+ 'required', 'nlistf', 'snlistf',
+ 'isnum', 'isflt', 'isbool', 'isbytes', 'isunicode', 'isstr',
+ 'isseq', 'seqof', 'listof', 'firstof',
+ 'all_matches', 'one_match',
+ 'strip_nl', 'norm_nl',
+ 'make_getter', 'make_setter', 'make_deleter', 'make_prop',
+ 'getattrs', 'setattrs', 'delattrs', 'make_delegate',
+ )
+
+import os, sys, re
+from os import path
+from types import IntType, LongType, FloatType, BooleanType
+from types import StringType, UnicodeType, StringTypes
+
+# Enlever le répertoire courant de sys.path
+try: from nutools_config import CLEAN_SYSPATH
+except ImportError: CLEAN_SYSPATH = True
+if CLEAN_SYSPATH:
+ def __clean_syspath():
+ cwd = os.getcwd()
+ sys.path = filter(lambda p: p not in ('', '.', cwd), sys.path)
+ __clean_syspath()
+ del __clean_syspath
+
+# emplacement du script courant
+myself = path.abspath(sys.argv[0])
+mydir, myname = path.split(myself)
+
+# Fonctions diverses
+
+class Undef(object):
+ def __repr__(self):
+ return 'Undef'
+ def __call__(self):
+ """créer une nouvelle instance de Undef, pratique pour un module qui veut
+ utiliser sa propre valeur différente de la valeur globale
+ """
+ return self.__class__()
+Undef = Undef()
+
+def updated(dict=None, **kw):
+ """Retourner une copie de dict mise à jour avec les éléments de kw
+ """
+ if dict is None: dict = {}
+ else: dict = dict.copy()
+ dict.update(kw)
+ return dict
+
+def updatem(dict=None, *dicts):
+ """Mets à jour dict avec les dictionnaires dicts, et retourner dict
+ """
+ if dict is None: dict = {}
+ for kw in dicts: dict.update(kw)
+ return dict
+
+class odict(dict):
+ """dictionnaire qui supporte aussi l'accès aux propriétés comme des attributs
+ """
+ def __init__(self, dict=None, **kw):
+ super(odict, self).__init__(**updated(dict, **kw))
+
+ def __getattr__(self, name):
+ try: return self[name]
+ except KeyError: raise AttributeError(name)
+
+ def __setattr__(self, name, value):
+ if name in self.__dict__: self.__dict__[name] = value
+ else: self[name] = value
+
+ def __delattr__(self, name):
+ try: del self[name]
+ except KeyError: raise AttributeError(name)
+
+ def copy(self):
+ return self.__class__(super(odict, self).copy())
+
+_none = object()
+class ncdict(odict):
+ """dictionnaire dont les clés sont insensibles à la casse
+ """
+ def __init__(self, dict=None, **kw):
+ super(ncdict, self).__init__(**updated(dict, **kw))
+
+ def __getitem__(self, key):
+ if isstr(key): key = key.lower()
+ return super(ncdict, self).__getitem__(key)
+
+ def __setitem__(self, key, value):
+ if isstr(key): key = key.lower()
+ return super(ncdict, self).__setitem__(key, value)
+
+ def __delitem__(self, key):
+ if isstr(key): key = key.lower()
+ return super(ncdict, self).__delitem__(key)
+
+ def __getattr__(self, key):
+ if isstr(key): key = key.lower()
+ return super(ncdict, self).__getattr__(key)
+
+ def __setattr__(self, key, value):
+ if isstr(key): key = key.lower()
+ return super(ncdict, self).__setattr__(key, value)
+
+ def __delattr__(self, key):
+ if isstr(key): key = key.lower()
+ return super(ncdict, self).__delattr__(key)
+
+ def has_key(self, key):
+ if isstr(key): key = key.lower()
+ return super(ncdict, self).has_key(key)
+
+ def get(self, key, default=_none):
+ if isstr(key): key = key.lower()
+ if default is _none: return super(ncdict, self).get(key)
+ else: return super(ncdict, self).get(key, default)
+
+def _itemprop(i, name):
+ def getter(self):
+ return self._values[i]
+ def setter(self, value):
+ validator = self.VALIDATORS.get(name, None)
+ if validator is not None: value = validator(value)
+ self._values[i] = value
+ return property(getter, setter)
+
+def _fix_module(cls):
+ try: cls.__module__ = sys._getframe(1).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError): pass
+ return cls
+
+def required(validator, name=None):
+ if name is None: name = "The value"
+ def wrapper(value):
+ value = validator(value)
+ if value is None: raise ValueError("%s is required" % name)
+ else: return value
+ return wrapper
+
+def nlistf(name, *attrs, **kw):
+ name = str(name)
+ # attributs
+ if len(attrs) == 1 and isstr(attrs[0]):
+ attrs = attrs[0].replace(',', ' ').split()
+ attrs = tuple(map(str, attrs))
+ # validateurs
+ validators = {}
+ for attr, validator in kw.iteritems():
+ if attr not in attrs:
+ raise ValueError("Invalid validator attribute: %s" % attr)
+ validators[attr] = validator
+
+ template = ["""class %(name)s(object):
+ __slots__ = ('_values')
+ ATTRS = None
+ VALIDATORS = None
+ def reset(self):
+ "Reinitialiser toutes les valeurs a None"
+ self._values = [None] * len(self.ATTRS)
+ return self
+ def replace(self, *values, **kw):
+ "Modifier des valeurs specifiques"
+ for i, attr in enumerate(self.ATTRS[:len(values)]): setattr(self, attr, values[i])
+ for attr, value in kw.iteritems(): setattr(self, attr, value)
+ return self
+ def init(self, *values, **kw):
+ "Modifier toutes les valeurs de cet objet. Les valeurs non specifiees recoivent None."
+ return self.reset().replace(*values, **kw)
+ def __init__(self, *values, **kw): self.init(*values, **kw)
+ def inito(self, o):
+ "Modifier toutes les valeurs de cet objet en les prenant depuis les attributs de l'objet o."
+ for attr in self.ATTRS: setattr(self, attr, getattr(o, attr, None))
+ def update(self, d):
+ "Mettre a jour le dictionnaire d avec les valeurs de cet objet"
+ for attr in self.ATTRS: d[attr] = getattr(self, attr)
+ def updateo(self, o):
+ "Mettre a jour les attributs de l'objet o avec les valeurs de cet objet."
+ for attr in self.ATTRS: setattr(o, attr, getattr(self, attr))
+ def asdict(self): return dict(zip(self.ATTRS, self._values))
+ def __repr__(self): return repr(self.asdict())
+ def __len__(self): return len(self._values)
+ def __getitem__(self, key): return self._values.__getitem__(key)
+ def __setitem__(self, key, value): self._values.__setitem__(key, value)
+ def __iter__(self): return self._values.__iter__()
+ def __contains__(self, item): return self._values.__contains__(item)"""]
+ for i, attr in enumerate(attrs):
+ template.append(" %s = itemprop(%i, '%s')" % (attr, i, attr))
+ template = "\n".join(template) % locals()
+ namespace = dict(itemprop=_itemprop)
+ try: exec template in namespace
+ except SyntaxError, e: raise SyntaxError('%s:\n%s' % (e.message, template))
+
+ cls = namespace[name]
+ cls.ATTRS = attrs
+ cls.VALIDATORS = validators
+ return _fix_module(cls)
+
+def snlistf(base, name, *attrs, **kw):
+ name = str(name)
+ # attributs
+ if len(attrs) == 1 and isstr(attrs[0]):
+ attrs = attrs[0].replace(',', ' ').split()
+ attrs = tuple(map(str, attrs))
+ allattrs = base.ATTRS + attrs
+ # validateurs
+ validators = base.VALIDATORS.copy()
+ for attr, validator in kw.iteritems():
+ if attr not in allattrs:
+ raise ValueError("Invalid validator attribute: %s" % attr)
+ validators[attr] = validator
+
+ template = ["""class %(name)s(base):
+ __slots__ = ()
+ ATTRS = None
+ VALIDATORS = None"""]
+ basei = len(base.ATTRS)
+ for i, attr in enumerate(attrs):
+ template.append(" %s = itemprop(%i, '%s')" % (attr, basei + i, attr))
+ template = "\n".join(template) % locals()
+ namespace = dict(base=base, itemprop=_itemprop)
+ try: exec template in namespace
+ except SyntaxError, e: raise SyntaxError('%s:\n%s' % (e.message, template))
+
+ cls = namespace[name]
+ cls.ATTRS = allattrs
+ cls.VALIDATORS = validators
+ return _fix_module(cls)
+
+def isnum(i):
+ """Tester si i est une valeur numérique (int ou long)
+ """
+ return type(i) in (IntType, LongType)
+def isflt(f):
+ """Tester si f est une valeur numérique flottante (float)
+ """
+ return type(f) is FloatType
+def isbool(b):
+ """Tester si b est une valeur booléenne
+ """
+ return type(b) is BooleanType
+def isseq(t):
+ """Tester si t est une séquence (list ou tuple)
+ """
+ return isinstance(t, list) or isinstance(t, tuple)
+def seqof(seq, ifNone=Undef, nocopy=False):
+ """Retourner une séquence.
+ Si seq est une séquence, retourner une copie de l'objet si nocopy==False,
+ sinon l'objet lui-même.
+ Si seq==None: si ifNone est défini, retourner ifNone, sinon un tuple vide.
+ Sinon, retourner le tuple (seq,)
+ """
+ if isseq(seq):
+ if nocopy: return seq
+ else: return seq[:]
+ elif seq is None:
+ if ifNone is Undef: return ()
+ else: return ifNone
+ else: return (seq,)
+def listof(seq, ifNone=Undef):
+ """Retourner une liste.
+ Si seq est une séquence, retourner la liste correspondante
+ Si seq==None: si ifNone est défini, retourner ifNone, sinon une liste vide.
+ Sinon, retourner la liste [seq]
+ """
+ if seq is None:
+ if ifNone is Undef: return []
+ else: return ifNone
+ elif isseq(seq): return list(seq)
+ else: return [seq]
+def firstof(seq):
+ """Retourner le premier élément de la séquence.
+ Si seq n'est pas une séquence, retourner l'objet lui-même.
+ Si seq est une séquence vide, retourner None.
+ """
+ if isseq(seq): return seq[0:1] and seq[0] or None
+ else: return seq
+def isbytes(s):
+ """Tester si s est une valeur chaine (str)
+ """
+ return type(s) is StringType
+def isunicode(s):
+ """Tester si s est une valeur chaine (unicode)
+ """
+ return type(s) is UnicodeType
+def isstr(s):
+ """Tester si s est une valeur chaine (str ou unicode)
+ """
+ return type(s) in StringTypes
+
+def all_matches(func, seq):
+ """Tester si tous les éléments de seq sont matchés par la fonction func.
+ """
+ for item in seqof(seq):
+ if not func(item): return False
+ return True
+
+def one_match(func, seq):
+ """Tester si au moins un des éléments de seq est matché par la fonction
+ func.
+ """
+ for item in seqof(seq):
+ if func(item): return True
+ return False
+
+def strip_nl(s):
+ """Enlever le caractère de fin de ligne de s: soit \\n, soit \\r, soit \\r\\n
+ """
+ if s is None: return None
+ elif s.endswith("\r\n"): s = s[: - 2]
+ elif s.endswith("\n"): s = s[: - 1]
+ elif s.endswith("\r"): s = s[: - 1]
+ return s
+
+RE_NL = re.compile(r'(?:\r?\n|\r)')
+def norm_nl(s, nl="\\n"):
+ """Transformer tous les caractères de fin de ligne en \\n
+ """
+ if s is None: return None
+ else: return RE_NL.sub(nl, s)
+
+def make_getter(name):
+ return lambda self: getattr(self, name)
+def make_setter(name, validator=None):
+ if validator is None:
+ return lambda self, value: setattr(self, name, value)
+ else:
+ return lambda self, value: setattr(self, name, validator(value))
+
+def make_deleter(name):
+ return lambda self: delattr(self, name)
+
+def make_prop(name, value=None, getter=True, setter=True, deleter=False, validator=None):
+ """Retourne un tuple facilitant la création d'une propriété protégée par
+ des accesseurs.
+
+ Voici un exemple d'usage:
+
+ class C:
+ _name, name, get_name, set_name = make_prop('_name', 'Default value')
+
+ @return: (value, property, getter_func, setter_func, deleter_func)
+ """
+ accessors = {}
+ if getter in (False, None): pass
+ elif getter is True: getter = make_getter(name)
+ if getter: accessors['fget'] = getter
+ if setter in (False, None): pass
+ elif setter is True: setter = make_setter(name, validator)
+ elif validator is not None:
+ _setter = setter
+ setter = lambda self, value: _setter(self, validator(value))
+ if setter: accessors['fset'] = setter
+ if deleter in (False, None): pass
+ elif deleter is True: deleter = make_deleter(name)
+ if deleter: accessors['fdel'] = deleter
+ result = [value, property(**accessors)]
+ if getter: result.append(accessors['fget'])
+ if setter: result.append(accessors['fset'])
+ if deleter: result.append(accessors['fdel'])
+ return tuple(result)
+
+def __check_names(names):
+ if not names: raise AttributeError("The attribute name is required")
+
+def getattrs(obj, names, strict=False):
+ u"""Soit un objet obj, et un nom de la forme "attr0.attr1....",
+ retourner l'objet obtenu avec l'expression obj.attr0.attr1....
+
+ @param strict: on requière que toute l'expression soit parcouru jusqu'à la
+ fin. Sinon, arrêter dès que le résultat de l'expression est None.
+ """
+ if not names: return obj
+ if not isseq(names): names = names.split(".")
+ __check_names(names)
+ value = obj
+ for i in range(len(names)):
+ name = names[i]
+ if value is None:
+ if strict:
+ if i > 0: path = "obj." + ".".join(names[:i])
+ else: path = "None"
+ raise AttributeError("%s instance has no value '%s'" % (path, name))
+ else: break
+ value = getattr(value, name)
+ return value
+
+def setattrs(obj, names, value):
+ u"""Soit un objet obj, et un nom de la forme "attr0.attr1....",
+ effectuer l'équivalent de l'opération:
+
+ obj.attr0.attr1.... = value
+ """
+ if not isseq(names): names = names.split(".")
+ __check_names(names)
+ obj = getattrs(obj, names[:-1], True)
+ setattr(obj, names[-1], value)
+
+def delattrs(obj, names):
+ u"""Soit un objet obj, et un nom de la forme "attr0.attr1....",
+ effectuer l'équivalent de l'opération:
+
+ del obj.attr0.attr1....
+ """
+ if not isseq(names): names = names.split(".")
+ __check_names(names)
+ obj = getattrs(obj, names[:-1], True)
+ delattr(obj, names[-1])
+
+def make_delegate(names, getter=True, setter=True, deleter=False):
+ if getter is True:
+ def getter(self):
+ return getattrs(self, names, True)
+ if setter is True:
+ def setter(self, value):
+ setattrs(self, names, value)
+ if deleter is True:
+ def deleter(self):
+ delattrs(self, names)
+
+ accessors = {}
+ if getter: accessors['fget'] = getter
+ if setter: accessors['fset'] = setter
+ if deleter: accessors['fdel'] = deleter
+ return property(**accessors)
+
+def get__all__(module):
+ """Retourner la valeur __all__ d'un module, ou la construire si cette
+ valeur n'est pas définie.
+
+ @rtype: tuple
+ """
+ all = getattr(module, '__all__', None)
+ if all is None:
+ all = []
+ for key in module.__dict__.keys():
+ if key[0] != '_': all.append(key)
+ return tuple(all)
+
+def import__module__(module_name, globals, locals=None, name=None):
+ """Importer dans globals le module nommé module_name, en le nommant name.
+
+ Par défaut, name est le nom de base du module. par exemple, le module
+ "a.b.c" sera importé sous le nom "c"
+ """
+ module = __import__(module_name, globals, locals)
+ basenames = module_name.split('.')
+ for basename in basenames[1:]:
+ module = getattr(module, basename)
+
+ if name is None: name = basenames[-1]
+ globals[name] = module
+ return [name]
+
+def import__all__(module_name, globals, locals=None, *names):
+ """Importer dans globals tous les objets du module nommé module_name
+ mentionnés dans names. Si names est vides, tous les objets sont importés
+ comme avec 'from module import *'
+ """
+ module = __import__(module_name, globals, locals)
+ basenames = module_name.split('.')
+ for basename in basenames[1:]:
+ module = getattr(module, basename)
+
+ if not names: names = get__all__(module)
+ __all__ = []
+ for name in names:
+ globals[name] = getattr(module, name, None)
+ __all__.append(name)
+ return __all__
diff --git a/lib/nulib/python/nulib/config.py b/lib/nulib/python/nulib/config.py
new file mode 100644
index 0000000..101048e
--- /dev/null
+++ b/lib/nulib/python/nulib/config.py
@@ -0,0 +1,876 @@
+# -*- coding: utf-8 -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+"""Fonctions utilitaires pour lire des fichiers de configuration.
+
+Dans un fichier de configuration, l'on reconnait des lignes de la forme::
+
+ [comment][prefix]varname=value
+
+value peut être placé entre double quotes ou simple quotes. Elle peut s'étendre sur
+plusieurs lignes si elle est mise entre quotes, ou si elle se termine par \
+"""
+
+__all__ = (
+ 'ConfigFile', 'ShConfigFile', 'PListFile',
+ 'ShConfig',
+)
+
+import os, string, re, types, shlex
+from os import path
+
+from .base import odict, make_prop, isseq, seqof, firstof
+from .uio import _s, _u
+from .files import TextFile
+from .formats import unicodeF
+
+####################
+# gestion des commentaires
+
+re_comments = {
+ 'shell': re.compile(r'[ \t]*#+'),
+ 'conf': re.compile(r"[ \t]*;+"),
+ 'C': re.compile(r'[ \t]*//+'),
+ 'visual basic': re.compile(r"[ \t]*'+"),
+ 'wincmd': re.compile(r'[ \t]*(?:r|R)(?:e|E)(?:m|M)'),
+ }
+
+def is_comment(s, type=None):
+ """Retourner vrai si s un commentaire (c'est à dire si la ligne commence par
+ un des styles de commentaires supportés)
+ """
+ comment_types = type is None and re_comments.values() or [re_comments[type]]
+ for comment_type in comment_types:
+ if comment_type.match(s): return True
+ return False
+
+####################
+# gestion des fichiers de configuration
+
+_marker = object()
+
+class ConfigFile(TextFile):
+ r"""Un fichier de configuration, que l'on doit lire sous Python, et que l'on
+ doit partager éventuellement avec d'autres langages ou d'autres systèmes
+ d'exploitation. Par exemple, il peut s'agir d'un fichier de configuration
+ sous bash.
+
+ Une valeur non quotée est trimée à droite et à gauche. Une valeur quotée
+ n'est jamais trimée.
+
+ Une valeur quotée peut être suivie d'une valeur non quotée, et les deux sont
+ mergés. Mais une fois que l'on commence à parser une valeur non quotée, plus
+ aucun traitement n'est effectuée, ce qui fait qu'une valeur quotée ne peut
+ pas suivre une valeur non quotée (cf le "andme" ci-dessus).
+
+ Ceci diffère du comportement de parseur plus évolués comme celui de bash. On
+ considère néanmoins que c'est une caractéristique, non un bug. XXX corriger
+ ce problème, ne serait-ce que pour supporter la lecture de fichiers tels que
+ var='value'\''with a quote'
+
+ Tests
+ =====
+
+ >>> from StringIO import StringIO
+ >>> input = StringIO(r'''# comment
+ ... name=value
+ ... name2= value
+ ... name3 = value
+ ... qname="qvalue"
+ ... qname2=" qvalue "
+ ... qname3 = " qvalue "
+ ... qname4="
+ ... multi-line
+ ... qvalue
+ ... "
+ ... fancy="\
+ ... noNL\
+ ... "foryou"andme"
+ ... quote='"'
+ ... quote2="\""
+ ... quote3='\''
+ ... quote4='\\'
+ ... quote5='\\\''
+ ... quote6='\\\'remainder'
+ ... ''')
+ >>> from ulib.base.config import ConfigFile
+ >>> cf = ConfigFile(input)
+ >>> cf.get_string('name')
+ u'value'
+ >>> cf.get_string('name2')
+ u'value'
+ >>> cf.get_string('name3')
+ u'value'
+ >>> cf.get_string('qname')
+ u'qvalue'
+ >>> cf.get_string('qname2')
+ u' qvalue '
+ >>> cf.get_string('qname3')
+ u' qvalue '
+ >>> cf.get_string('qname4')
+ u'\n multi-line\n qvalue\n '
+ >>> cf.get_string('fancy')
+ u'noNLforyouandme'
+ >>> cf.get_string('quote')
+ u'"'
+ >>> cf.get_string('quote2')
+ u'\\"'
+ >>> cf.get_string('quote3')
+ u"\\'"
+ >>> cf.get_string('quote4')
+ u'\\\\'
+ >>> cf.get_string('quote5')
+ u"\\\\\\'"
+ >>> cf.get_string('quote6')
+ u"\\\\\\'remainder"
+
+ """
+
+ # valeurs lues dans le fichier de configuration
+ _items, items = make_prop('_items')[:2]
+ # valeurs par défaut
+ _defaults, defaults = make_prop('_defaults')[:2]
+ # expression régulière identifiant le préfixe des variables
+ _prefix, prefix = make_prop('_prefix', '')[:2]
+ # expression régulière identifiant pour le séparateur entre le nom de la
+ # variable et sa valeur.
+ _equals, equals = make_prop('_equals', r'\s*=')[:2]
+ # faut-il considérer les variables en commentaires?
+ _comment, comment = make_prop('_comment')[:2]
+
+ ############################################################################
+ # interface publique
+
+ def __init__(self, file=None, defaults=None,
+ prefix=None, equals=None, comment=False,
+ raise_exception=True, lines=None):
+ """
+ @param prefix une expression régulière identifiant un préfixe mentionné
+ avant chaque variable. par exemple, si prefix=='##@' et qu'on
+ cherche la variable value, alors la ligne ##@value est cherchée.
+ @param comment faut-il considérer les valeurs qui sont en commentaires?
+ Si oui, tout se passe comme si le commentaire n'existe pas.
+ @param defaults un ensemble de valeurs par défaut qui sont retournées si la
+ variable n'existe pas dans le fichier.
+ @param lines instance de Lines ou BLines permettant de décoder le contenu du
+ fichier.
+ """
+ super(ConfigFile, self).__init__(file, raise_exception=raise_exception, lines=lines)
+ self._items = {}
+ self._defaults = defaults or {}
+ if prefix is not None: self._prefix = prefix
+ if equals is not None: self._equals = equals
+ self._comment = comment
+
+ def __getitem__(self, name, default=_marker):
+ """Obtenir la valeur de la variable name, telle qu'elle a été lue.
+ Si c'est un tableau, retourner une liste. Sinon retourner une chaine.
+
+ Si la variable n'est pas définie, retourner default.
+ """
+ if not self._items.has_key(name): self._load_value(name)
+ if default is _marker:
+ if not self._items.has_key(name) and self._defaults.has_key(name):
+ return self._defaults[name]
+ return self._items[name]
+ return self._items.get(name, default)
+ get = __getitem__
+
+ def __setitem__(self, name, value):
+ self._items[name] = value
+
+ def __delitem__(self, name):
+ del self._items[name]
+
+ def has_key(self, name):
+ try: self.__getitem__(name)
+ except KeyError: return False
+ else: return True
+
+ def get_string(self, name, default=_marker):
+ """Obtenir la valeur de la variable name. Si la variable est un tableau,
+ retourner la première valeur de ce tableau. Retourner None si le tableau
+ est vide.
+ """
+ value = self.__getitem__(name, default)
+ if isseq(value): return firstof(value)
+ else: return value
+
+ def get_lines(self, name, strip=False, default=_marker):
+ """Obtenir une valeur avec get_string(), et la spliter sur le caractère
+ de fin de ligne. Retourner la liste des lignes.
+
+ si strip est vrai, on strip toutes les lignes puis on enlève les
+ lignes vides.
+ """
+ lines = self.get_string(name, default)
+ if not isseq(lines): lines = re.split(r'(?:\r?)\n', lines)
+ if strip: lines = filter(None, map(string.strip, lines))
+ return lines
+
+ def get_paths(self, name, strip=False, default=_marker):
+ """Obtenir une valeur avec get_string(), la splitter sur le caractère
+ 'os.path.pathsep'. Retourner la liste des chemins.
+
+ si strip est vrai, on strip toutes les valeurs puis on enlève les
+ valeurs vide.
+ """
+ paths = self.get_string(name, default)
+ if not isseq(paths): paths = paths.split(path.pathsep)
+ if strip: paths = filter(None, map(string.strip, paths))
+ return paths
+
+ def get_array(self, name, default=_marker):
+ """Obtenir la liste des valeurs de la variable name. Si name est une
+ valeur scalaire, retourner une liste d'un seul élément.
+ """
+ return list(seqof(self.__getitem__(name, default)))
+
+ ############################################################################
+ # partie privée
+
+ RE_ANTISLASHES = re.compile(r'\\+$')
+ def _is_cont(self, value):
+ """Tester si value doit être fusionné avec la ligne suivante à cause de
+ la présence d'un caractère de continuation de ligne.
+
+ Par défaut, on teste si value se termine par un nombre impair de '\\'
+ """
+ mo = self.RE_ANTISLASHES.search(value)
+ if mo is None: return False
+ return len(mo.group()) % 2 == 1
+
+ def _strip_cont(self, value):
+ """Enlever le caractère de continuation de ligne de value. On assume que
+ self._is_cont(value) est vrai.
+ """
+ return value[:-1]
+
+ def _merge_cont(self, index, value, sep=''):
+ """Merger value située à la ligne index, et la ligne suivante, en les
+ séparant par sep. On assume que self._is_cont(value) est vrai, et que le
+ caractère de continuation a été enlevé avec self._strip_cont(value)
+
+ Dans la valeur de retour, eof vaut True si la fin de fichier est
+ rencontrée.
+
+ @return (index+1, merged_value, eof)
+ """
+ if index + 1 < len(self.lines):
+ index += 1
+ value = value + sep + self.lines[index]
+ eof = False
+ else:
+ eof = True
+ return index, value, eof
+
+ def _unescape(self, value, quote=''):
+ """Traiter les séquences d'échappement dans une valeur scalaire. Si la
+ valeur était quotée, quote contient la valeur du caractère ("'", '"' ou
+ ''). Par défaut, ne rien faire.
+
+ Cette fonction doit être surchargée en fonction du type de fichier de
+ configuration que l'on lit.
+
+ La valeur quote=='' signifie que la valeur n'était pas quotée, mais il
+ peut quand même y avoir des séquences d'échappement à traiter.
+ """
+ return value
+
+ def _load_value(self, name):
+ """charger la valeur d'une variable depuis le fichier.
+
+ XXX rendre le parcours plus robuste: faire attention à ne pas lire une
+ valeur à l'intérieur d'une autre valeur. Par exemple:
+
+ var1="\
+ var2=bad
+ "
+ var2=good
+
+ Avec l'implémentaion actuelle, si on demande la valeur de var2, on
+ obtient bad. Une façon de corriger cela de parcourir *tout* le fichier,
+ de lire les valeurs non analysées de chaque variable au fur et à mesure,
+ puis de les placer en cache. ensuite, _load_value() se contenterai
+ d'analyser les valeurs dans le cache.
+
+ @return None si la valeur n'est pas trouvée dans le fichier. Sinon,
+ retourner une valeur scalaire ou une séquence en fonction du type de la
+ valeur.
+ """
+ # le groupe 1 sera testé pour voir si c'est un commentaire
+ re_varname = re.compile(r'(.*)%s%s%s' % (self._prefix, name, self._equals))
+ re_value = re.compile(r'.*%s%s%s(.*)' % (self._prefix, name, self._equals))
+
+ indexes = self.grepi(re_varname)
+ if not indexes: return None
+
+ # trouver d'abord la ligne appropriée
+ comment = ''
+ for index in indexes:
+ comment = re_varname.match(self.lines[index]).group(1)
+ if is_comment(comment):
+ # si la valeur est en commentaire, ne l'accepter que si
+ # self._comment est vrai
+ if not self._comment:
+ continue
+ # nous avons trouvé l'index de la ligne
+ break
+ else:
+ # aucune ligne n'a été trouvée
+ return
+
+ # ensuite lire la valeur
+ value = re_value.match(self.lines[index]).group(1)
+ value = self._parse_logic(index, value)
+ self._items[name] = value
+
+ def _parse_logic(self, index, value):
+ """Implémenter la logique d'analyse de la valeur d'une variable.
+
+ Il faut reimplémenter cette méthode si on veut modifier le type de
+ valeurs supportées. _parse_scalar() permet d'analyser une valeur simple,
+ _parse_array() permet d'analyser un tableau de valeurs.
+
+ Par défaut, on ne supporte que les valeurs scalaire. Utiliser
+ ShConfigFile pour supporter les tableaux.
+ """
+ value = value.lstrip() # ignorer les espaces avant la valeur
+ return self._parse_scalar(index, value)
+
+ ## valeurs scalaires simples
+
+ RE_SPACES = re.compile(r'\s+')
+ def _parse_scalar(self, index, value):
+ remainder = value
+ value = ''
+ lstrip = None
+ rstrip = None
+ while remainder:
+ mo = self.RE_SPACES.match(remainder)
+ if mo is not None:
+ # ne pas supprimer les espaces entre les valeurs
+ remainder = remainder[mo.end():]
+ value += mo.group()
+ # XXX supporter de spécifier le type de commentaires valides dans ce
+ # fichier de configuration. A cet endroit, il faudrait pouvoir
+ # éliminer les commentaires qui sont sur la ligne. évidemment, ce ne
+ # serait pas forcément approprié suivant la configuration. exemple:
+ # REM pour un fichier cmd n'est valide qu'en début de ligne.
+ elif self._is_quoted(remainder):
+ # valeur quotée. pas de strip
+ if lstrip is None: lstrip = False
+ rstrip = False
+ index, next_value, remainder = self._parse_quoted(index, remainder)
+ value += self._unescape(next_value)
+ else:
+ # valeur non quotée. lstrip si en premier. rstrip si en dernier
+ if lstrip is None: lstrip = True
+ rstrip = True
+ index, next_value, remainder = self._parse_value(index, remainder)
+ value += self._unescape(next_value)
+ if lstrip: value = value.lstrip()
+ if rstrip: value = value.rstrip()
+ return value
+
+ RE_VALUE = re.compile('[^\\s\'"]*')
+ def _parse_value(self, index, value, pattern=None):
+ """Parser une valeur simple non quotée à partir de value (qui se trouve
+ à la position index) et des lignes suivant index si la ligne se termine
+ par '\\'.
+
+ @return index, value, remainder
+ """
+ while self._is_cont(value):
+ value = self._strip_cont(value)
+ index, value, eof = self._merge_cont(index, value)
+ if eof: break
+ if pattern is None: pattern = self.RE_VALUE
+ mo = pattern.match(value)
+ if mo is None:
+ return index, '', value
+ else:
+ remainder = value[mo.end():]
+ value = value[:mo.end()]
+ return index, value, remainder
+
+ ## valeurs scalaires quotées
+
+ def _is_quoted(self, value):
+ """Tester si value est le début d'une valeur quotée. Ignorer les espaces
+ avant la quote.
+ """
+ return value.lstrip()[:1] in ('"', "'")
+
+ def _search_next_quote(self, value, re_quote):
+ """Chercher un match de re_quote dans value, qui ne soit pas précédé par
+ un nombre impair de '\\'.
+ """
+ pos = 0
+ while True:
+ mo = re_quote.search(value, pos)
+ if mo is None: return None
+ if self._is_cont(value[:mo.start()]):
+ # nombre impair de '\\', la quote est mise en échappement
+ pos = mo.end()
+ else:
+ return mo
+
+ RE_QUOTE = re.compile(r'[\'"]')
+ def _parse_quoted(self, index, value):
+ """Parser une valeur quotée à partir de value (qui se trouve à la
+ position index) et des lignes suivant index.
+
+ value *doit* commencer par la quote. si _is_quoted(value) est vrai, il
+ faut enlever les espaces éventuels au début de value avant de la passer
+ à cette méthode.
+
+ @return index, value, remainder
+ """
+ if self.RE_QUOTE.match(value) is None:
+ raise ValueError("value must start with a quote, got %s" % repr(_s(value)))
+ quote, value = value[:1], value[1:]
+ re_quote = re.compile(quote)
+ mo = self._search_next_quote(value, re_quote)
+ while mo is None:
+ if self._is_cont(value):
+ value = self._strip_cont(value)
+ index, value, eof = self._merge_cont(index, value)
+ else:
+ index, value, eof = self._merge_cont(index, value, self.nl)
+ mo = self._search_next_quote(value, re_quote)
+ if eof: break
+ if mo is None:
+ # valeur quotée, mais mal terminée. on fait comme si on a rien vu
+ return index, value, ''
+ else:
+ remainder = value[mo.end():]
+ value = value[:mo.start()]
+ return index, value, remainder
+
+ ## tableaux
+
+ def _is_array(self, value):
+ """Tester si value est le début d'un tableau. Ignorer les espaces avant
+ le tableau.
+ """
+ return False
+
+ def _parse_array(self, index, value):
+ """Parser un tableau à partir de value (qui se trouve à la position
+ index) et des lignes suivant index.
+
+ value *doit* commencer par le tableau. si _is_array(value) est vrai, il
+ faut enlever les espaces éventuels au début de value avant de la passer
+ à cette méthode.
+ """
+ return []
+
+class ShConfigFile(ConfigFile):
+ r"""Un fichier de configuration qui est susceptible d'être lu aussi par bash
+ (ou tout autre shell sh-like). On supporte l'évaluation de variables, et
+ certaines séquences d'échappement pour des valeurs quotées.
+
+ Il y a certaines limitations: lors de la lecture des valeurs des variables,
+ les caractères sont traduits suivant la correspondance suivante:
+
+ \ en fin de ligne: continuer sur la ligne suivante
+ \" "
+ \\ \
+ \$ $
+
+ La séquence \` n'est pas traduite. En effet, pour que cela aie un sens, il
+ faudrait que l'on traduise aussi `cmd`
+
+ De plus, on ne supporte que les variables de la forme $var et ${var}
+
+ Tests
+ =====
+
+ >>> from StringIO import StringIO
+ >>> input = StringIO(r'''# comment
+ ... var1=value
+ ... var2="value"
+ ... var3='value'
+ ... var4=(value1 "value2" 'value3')
+ ... var5=(
+ ... value1
+ ... "value2\
+ ... " 'value3'
+ ... )
+ ... var6=()
+ ... var7=( )
+ ... var8=(
+ ... )
+ ... ''')
+ >>> from ulib.base.config import ShConfigFile
+ >>> cf = ShConfigFile(input)
+ >>> cf.get_string('var1')
+ u'value'
+ >>> cf.get_string('var2')
+ u'value'
+ >>> cf.get_string('var3')
+ u'value'
+ >>> cf.get_string('var4')
+ u'value1'
+ >>> cf.get_array('var4')
+ [u'value1', u'value2', u'value3']
+ >>> cf.get_array('var5')
+ [u'value1', u'value2', u'value3']
+ >>> [cf.get_array(name) for name in ('var6', 'var7', 'var8')]
+ [[], [], []]
+ >>> cf.get_array('var1')
+ [u'value']
+ >>> cf.get_string('var4')
+ u'value1'
+ >>> cf.get_string('var6') is None
+ True
+ """
+
+ RE_VAR = re.compile(r'\$(?:\{([^}]+)\}|(\w+))')
+ TRANS_MAP = {r'\"': '"', r'\\': '\\', r'\$': '$'}
+
+ def __convert(self, value):
+ # XXX rendre la conversion plus robuste: veiller à l'ordre ('\\\\' en
+ # dernier...), et ne faire la conversion que pour un nombre impaire de
+ # '\\'.
+ for s, r in self.TRANS_MAP.items():
+ value = value.replace(s, r)
+ return value
+
+ def _unescape(self, value, quote=''):
+ """convertir une valeur quotée, suivant les règles de bash.
+ quote peut valoir "'", '"', ''
+ """
+ # aucune traduction entre ''
+ if quote == "'": return value
+ # sinon appliquer les règles standards. notamment, remplacer $var et
+ # ${var} par self._items["var"] ou os.environ["var"]
+ splited = self.RE_VAR.split(value)
+ value = self.__convert(splited[0])
+ splited = splited[1:]
+ while splited:
+ var0 = splited[0]
+ var1 = splited[1]
+ text = splited[2]
+ splited = splited[3:]
+ var = var0 or var1
+ if self.has_key(var): value = value + self.get_string(var)
+ else: value = value + os.environ.get(var, "")
+ value = value + self.__convert(text)
+ return value
+
+ def _parse_logic(self, index, value):
+ value = value.lstrip() # ignorer les espaces avant la valeur
+ if self._is_array(value): return self._parse_array(index, value)
+ else: return self._parse_scalar(index, value)
+
+ ## tableaux
+
+ def _is_array(self, value):
+ """Tester si value est le début d'un tableau.
+ """
+ return value.strip()[:1] == '('
+
+ RE_ARRAY_VALUE = re.compile('[^\\s\'")]*')
+ def _parse_next_scalar(self, index, value):
+ """Parser la prochaine valeur scalaire
+ XXX à faire
+ @return index, value, remainder
+ """
+ remainder = value
+ value = ''
+ lstrip = None
+ rstrip = None
+ while remainder:
+ if self.RE_SPACES.match(remainder) is not None:
+ # les valeurs sont séparées par des espaces
+ break
+ # XXX cf ConfigFile._parse_scalar pour la gestion des commentaires
+ elif self.RE_EOA.match(remainder) is not None:
+ # fin de tableau
+ break
+ elif self._is_quoted(remainder):
+ # valeur quotée. pas de strip
+ if lstrip is None: lstrip = False
+ rstrip = False
+ index, next_value, remainder = self._parse_quoted(index, remainder)
+ value += self._unescape(next_value)
+ else:
+ # valeur non quotée. lstrip si en premier. rstrip si en dernier
+ if lstrip is None: lstrip = True
+ rstrip = True
+ index, next_value, remainder = self._parse_value(index, remainder, self.RE_ARRAY_VALUE)
+ value += self._unescape(next_value)
+ if lstrip: value = value.lstrip()
+ if rstrip: value = value.rstrip()
+ return index, value, remainder
+
+ RE_SOA = re.compile(r'\(')
+ RE_EOA = re.compile(r'\)')
+ def _parse_array(self, index, value):
+ """Parser un tableau à partir de value (qui se trouve à la position
+ index) et des lignes suivant index.
+
+ @return index, values, remaining
+ """
+ if self.RE_SOA.match(value) is None:
+ raise ValueError("value must start with '(', got %s" % repr(_s(value)))
+ remainder = value[1:]
+ values = []
+ eoa = False # end of array
+ while True:
+ if not remainder:
+ # nous n'avons pas encore rencontré la fin du tableau. Lire les
+ # lignes jusqu'à ce que nous trouvions ce qui est nécessaire
+ index, remainder, eof = self._merge_cont(index, remainder)
+ if eof: break
+ # ignorer les espaces entre les valeurs
+ mo = self.RE_SPACES.match(remainder)
+ if mo is not None:
+ remainder = remainder[mo.end():]
+ continue
+ # tester si on arrive à la fin du tableau
+ if self.RE_EOA.match(remainder) is not None:
+ remainder = remainder[1:]
+ eoa = True
+ break
+ # parser une valeur scalaire
+ index, next_value, remainder = self._parse_next_scalar(index, remainder)
+ values.append(next_value)
+ # ici, eoa vaut True si le tableau a été terminé proprement.
+ # sinon, on fait comme si on a rien vu.
+ return values
+
+_debug = False
+def _print_debug(s):
+ if _debug: print s
+
+class PListFile(TextFile):
+ def readlines(self, raise_exception=True, close=True):
+ TextFile.readlines(self, raise_exception, close)
+
+ self.items = None
+ self.list = None
+ self.value = None
+
+ if self.is_valid():
+ if self.lines and self.lines[0][:5] == ' 11:
+ month -= 12
+ year += 1
+ while month < 0:
+ month += 12
+ year -= 1
+ month += 1
+ return year, month
+MONTHDAYS = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
+def _monthdays(year, month, offset=0):
+ year, month = _fix_month(year, month + offset)
+ if month == 2 and _isleap(year): leapday = 1
+ else: leapday = 0
+ return MONTHDAYS[month] + leapday
+def _fix_day(year, month, day):
+ # on assume que month est déjà "fixé"
+ day -= 1
+ while day > _monthdays(year, month) - 1:
+ day -= _monthdays(year, month)
+ year, month = _fix_month(year, month + 1)
+ while day < 0:
+ year, month = _fix_month(year, month - 1)
+ day += _monthdays(year, month)
+ day += 1
+ return year, month, day
+def _fix_date(day, month, year):
+ year, month = _fix_month(year, month)
+ year, month, day = _fix_day(year, month, day)
+ return day, month, year
+
+MONTHNAMES = [u"Janvier", u"Février", u"Mars", u"Avril", u"Mai", u"Juin",
+ u"Juillet", u"Août", u"Septembre", u"Octobre", u"Novembre", u"Décembre",
+ ]
+MONTHNAMES3 = [u"Jan", u"Fév", u"Mar", u"Avr", u"Mai", u"Jun",
+ u"Jul", u"Aoû", u"Sep", u"Oct", u"Nov", u"Déc",
+ ]
+MONTHNAMES1 = [u"J", u"F", u"M", u"A", u"M", u"J",
+ u"J", u"A", u"S", u"O", u"N", u"D",
+ ]
+
+class Date(object):
+ """Un wrapper pour 'datetime.date'.
+
+ Attention! Cet objet est mutable, il ne faut donc pas l'utiliser comme clé
+ dans un dictionnaire.
+ """
+ _d = None
+
+ def __init__(self, day=None, month=None, year=None, t=None):
+ """Initialiser l'objet.
+
+ Dans l'ordre, les champs considérés sont:
+ - day si c'est une instance de Date ou datetime.date
+ - t le nombre de secondes depuis l'epoch, comme retourné par
+ time.time(). Cette valeur est fusionnée avec les valeurs numériques
+ day, month, year.
+ """
+ if day is not None and not isnum(day) and month is None and year is None and t is None:
+ if isinstance(day, pydatetime): day = day.date()
+ if isinstance(day, pydate): self._d = day
+ elif isinstance(day, Date): self._d = day._d
+ if self._d is None:
+ if t is None: t = time()
+ y, m, d = localtime(t)[:3]
+ if year is None: year = y
+ if month is None: month = m
+ if day is None: day = d
+ day, month, year = _fix_date(day, month, year)
+ self._d = pydate(year, month, day)
+
+ date = property(lambda self: self._d)
+ year = property(lambda self: self._d.year)
+ month = property(lambda self: self._d.month)
+ day = property(lambda self: self._d.day)
+
+ # nombre de jours du mois
+ monthdays = property(lambda self: MONTHDAYS[self.month])
+
+ def weekday(self):
+ """Retourner le jour de la semaine, de 0 (lundi) à 6 (dimanche)
+ """
+ return self._d.weekday()
+ def isoweekday(self):
+ """Retourner le jour de la semaine, de 1 (lundi) à 7 (dimanche)
+ """
+ return self._d.isoweekday()
+ def is_today(self):
+ """Tester si cette date est le jour d'aujourd'hui
+ """
+ now = self.__class__()._d
+ date = self._d
+ return now.year == date.year and now.month == date.month and now.day == date.day
+
+ def calday(self, show_month=False, show_year=False):
+ """Retourner 'day' si day != 1 and not show_month and not show_year,
+ 'day/month' si month != 1 and not show_year,
+ 'day/month/year' sinon
+ """
+ day, month, year = self.day, self.month, self.year
+ if day != 1 and not show_month and not show_year: return _u(day)
+ elif month != 1 and not show_year: return u"%i/%i" % (day, month)
+ else: return u"%i/%i/%i" % (day, month, year)
+
+ def monthname(self, format=None):
+ """Obtenir le nom du mois.
+ Si format est dans (1, 't', 'tiny'), retourner le nom sur 1 lettre.
+ Si format est dans (3, 's', 'small'), retourner le nom sur 3 lettres.
+ Sinon, retourner le nom complet.
+ """
+ if format in (1, 't', 'tiny'): names = MONTHNAMES1
+ elif format in (3, 's', 'small'): names = MONTHNAMES3
+ else: names = MONTHNAMES
+ return names[self.month - 1]
+
+ __monthname1 = lambda self: self.monthname(1)
+ __monthname3 = lambda self: self.monthname(3)
+ FORMAT_MAP = {'%Y': '%(y)04i', '%m': '%(m)02i', '%d': '%(d)02i',
+ '%H': '%(H)02i', '%M': '%(M)02i', '%S': '%(S)02i',
+ '%1m': __monthname1, '%3m': __monthname3, '%fm': monthname,
+ '%C': calday,
+ }
+ def format(self, format=None):
+ """Formater la date pour affichage.
+
+ Les champs valides sont %Y, %m, %d qui correspondent à la date de cet
+ objet, %H, %M, %S qui valent toujours 0, et %1m, %3m, %fm, %C, qui
+ correspondent respectivement à self.monthname(1), self.monthname(3),
+ self.monthname(), self.calday().
+ """
+ if format is None: format = FR_DATEF
+ y, m, d, H, M, S = self.year, self.month, self.day, 0, 0, 0
+ for fr, to in self.FORMAT_MAP.items():
+ if callable(to): to = to(self)
+ format = format.replace(fr, to)
+ return format % locals()
+
+ def set(self, day=None, month=None, year=None):
+ kw = {}
+ for name, value in [('day', day), ('month', month), ('year', year)]:
+ if value is not None: kw[name] = value
+ self._d = self._d.replace(**kw)
+ return self
+
+ def set_weekday(self, weekday=0):
+ if self.weekday() != weekday:
+ day = self.day + weekday - self.weekday()
+ self.set(*_fix_date(day, self.month, self.year))
+ return self
+
+ def set_isoweekday(self, isoweekday=1):
+ if self.isoweekday() != isoweekday:
+ day = self.day + isoweekday - self.isoweekday()
+ self.set(*_fix_date(day, self.month, self.year))
+ return self
+
+ def __repr__(self):
+ return '%s(%i, %i, %i)' % (self.__class__.__name__, self.year, self.month, self.day)
+ def __str__(self):
+ return '%02i/%02i/%04i' % (self.day, self.month, self.year)
+ def __unicode__(self):
+ return u'%02i/%02i/%04i' % (self.day, self.month, self.year)
+
+ def __eq__(self, other): return self._d == self._date(other, False)
+ def __ne__(self, other): return self._d != self._date(other, False)
+ def __lt__(self, other):
+ if other is None: return False
+ else: return self._d < self._date(other)
+ def __le__(self, other):
+ if other is None: return False
+ else: return self._d <= self._date(other)
+ def __gt__(self, other):
+ if other is None: return True
+ else: return self._d > self._date(other)
+ def __ge__(self, other):
+ if other is None: return True
+ else: return self._d >= self._date(other)
+ def __cmp__(self, other):
+ if other is None: return 1
+ else: return cmp(self._d, self._date(other))
+ def __hash__(self): return hash(self._d)
+
+ def _date(self, d, required=True):
+ """Retourner l'instance de datetime.date correspondant à l'objet d.
+ """
+ if isinstance(d, pydate): return d
+ elif isinstance(d, pydatetime): return d.date()
+ elif isinstance(d, Date): return d._d
+ elif required: raise ValueError("Expected datetime.date or Date instance, got %s" % repr(d))
+ else: return None
+
+ def _delta(self, td):
+ """Retourner l'instance de datetime.timedelta correspondant à l'objet td
+ """
+ if isinstance(td, timedelta): return td
+ elif isnum(td): return timedelta(td)
+ else: raise ValueError("Expected number or datetime.delta instance got %s" % repr(td))
+
+ def _new(cls, d=None, t=None):
+ """Constructeur. d est une instance de Date ou datetime.date. t est un
+ nombre de secondes depuis l'epoch.
+ """
+ if d is not None:
+ if isinstance(d, pydate): return cls(d.day, d.month, d.year)
+ elif isinstance(d, pydatetime): return cls(d.day, d.month, d.year)
+ elif isinstance(d, Date): return cls(d.day, d.month, d.year)
+ else: raise ValueError("Expected datetime.date or Date instance, got %s" % repr(d))
+ elif t is not None: return cls(t=t)
+ else: return cls()
+ _new = classmethod(_new)
+
+ def copy(self):
+ """Retourner une nouvelle instance, copie de cet objet
+ """
+ return self._new(self._d)
+
+ def replace(self, day=None, month=None, year=None):
+ """Retourner une nouvelle instance avec les champs spécifiés modifiés.
+ """
+ kw = {}
+ for name, value in [('day', day), ('month', month), ('year', year)]:
+ if value is not None: kw[name] = value
+ return self._new(self._d.replace(**kw))
+
+ def __add__(self, other): return self._new(self._d + self._delta(other))
+ __radd__ = __add__
+ def add(self, days=1): return self + days
+
+ def __sub__(self, other): return self._new(self._d - self._delta(other))
+ __rsub__ = __sub__
+ def sub(self, days=1): return self - days
+
+ def diff(self, other):
+ """Retourner le nombre de jours de différences entre cette date et other
+ """
+ delta = self._d - self._date(other)
+ return delta.days
+
+ def __fix_weekday(self, date):
+ """Si date est après jeudi, retourner le début de la semaine
+ suivante, sinon retourner le début de la semaine courante.
+ """
+ date = date.copy()
+ if date.weekday() > 3:
+ date = date.set_weekday(0)
+ date += 7
+ else:
+ date.set_weekday(0)
+ return date
+
+ def get_monthweeks(self, complete=True, only_debut=None):
+ """Retourner une liste de dates (debut, fin) correspondant aux débuts
+ et aux fins des semaine du mois de cet objet.
+
+ Si only_debut==True, ne retourner que la liste de valeurs debut au lieu
+ des tuples (debut, fin). Par défaut only_debut==complete
+
+ Si complete==True, on ne retourne que des semaines complètes: les dates
+ au début et à la fin du mois sont corrigées pour inclure les jours du
+ mois précédent et du mois suivant s'il y a au moins 4 jours dans le mois
+ courant.
+
+ Sinon, les semaines du début et de la fin du mois peuvent être tronquées
+ et ne contiennent que les jours du mois.
+ """
+ if only_debut is None: only_debut = complete
+
+ first = self.copy().set(1)
+ monthdays = first.monthdays
+ last = first + monthdays
+ weeks = []
+ if complete:
+ first = self.__fix_weekday(first)
+ last = self.__fix_weekday(last)
+ debut = first
+ while debut < last:
+ fin = debut + 6
+ if only_debut: weeks.append(debut)
+ else: weeks.append((debut, fin))
+ debut = fin + 1
+ else:
+ last -= 1
+ debut = first
+ while debut <= last:
+ fin = debut.copy().set_weekday(6)
+ if fin > last: fin = last
+ if only_debut: weeks.append(debut)
+ else: weeks.append((debut, fin))
+ debut = fin + 1
+ return weeks
+
+def isdate(d):
+ """Tester si d est une instance de Date
+ """
+ return isinstance(d, Date)
+def isanydate(d):
+ """Tester si d est une instance de Date, datetime.date ou datetime.datetime
+ """
+ return isinstance(d, Date) or isinstance(d, pydate) or isinstance(d, pydatetime)
+
+RE_DATE_FR = re.compile(r'(\d+)(?:/(\d+)(?:/(\d+))?)?$')
+RE_DATE_ISO = re.compile(r'(\d+)-(\d+)-(\d+)$')
+def parse_date(s):
+ """Parser une chaine et retourner une instance de Date
+ """
+ mof = RE_DATE_FR.match(s)
+ moi = RE_DATE_ISO.match(s)
+ if mof is not None:
+ year = mof.group(3)
+ month = mof.group(2)
+ day = mof.group(1)
+ elif moi is not None:
+ year = moi.group(1)
+ month = moi.group(2)
+ day = moi.group(3)
+ else:
+ raise ValueError("Invalid date format: %s" % _s(s))
+ if year is not None: year = _fix_year(int(year))
+ if month is not None: month = int(month)
+ if day is not None: day = int(day)
+ return Date(day, month, year)
+
+def ensure_date(d):
+ """Retourner une instance de Date, ou None si d==None.
+
+ d peut être une intance de datetime.date, Date ou une chaine.
+ """
+ if d is None: return None
+ elif isinstance(d, Date): return d
+ elif isinstance(d, pydate): return Date._new(d)
+ elif isinstance(d, pydatetime): return Date._new(d)
+ if not isstr(d): d = _s(d)
+ return parse_date(d)
+
+def _tzname():
+ tz = time_mod.timezone
+ if tz > 0: s = "-"
+ else: s = "+"
+ tz = abs(tz) / 60
+ h = tz / 60
+ m = tz % 60
+ return "%s%02i%02i" % (s, h, m)
+
+def rfc2822(time=None, gmt=True):
+ """Retourner la date au format rfc 2822.
+
+ time est une date au format de time.time()
+ """
+ if time is None: time = time_mod.time()
+ if gmt:
+ time = gmtime(time)
+ tzname = "+0000"
+ else:
+ time = localtime(time)
+ tzname = _tzname()
+ return "%s %s" % (asctime(time), tzname)
+
+class _DateSpecConstants:
+ """Constantes utilisées par les classes DateSpec et ses filles
+ """
+
+ # Contrainte
+ C = r'(?:!(w|n)(\d+))'
+ C_COUNT = 2 # nombre de groupes pour l'expression régulière C
+ C_OP = 0 # numéro relatif du groupe pour la valeur OP
+ C_WD = 1 # numéro relatif du groupe pour la valeur WEEKDAY
+
+ # Spécification
+ I = r'(\d+)'
+ I_COUNT = 1 # nombre de groupes pour l'expression régulière I
+ I_VALUE = 0 # numéro relatif du groupe pour la valeur VALUE
+
+ R = r'(?:(\d+)(?:\s*-\s*(\d+))?)' # Range
+ R_COUNT = 2 # nombre de groupes pour l'expression régulière R
+ R_FROM = 0 # numéro relatif du groupe pour la valeur FROM
+ R_TO = 1 # numéro relatif du groupe pour la valeur TO
+
+ AOR = r'(?:(\*)|%s)' % R # AnyOrRange
+ AOR_COUNT = 1 + R_COUNT # nombre de groupes pour l'expression régulière AOR
+ AOR_R_POS = 1 # position du premier groupe de l'expression R dans AOR
+ AOR_ANY = 0
+ AOR_FROM = AOR_R_POS + R_FROM # numéro relatif du groupe pour la valeur FROM
+ AOR_TO = AOR_R_POS + R_TO # numéro relatif du groupe pour la valeur TO
+
+ S = r'(?:\+%s|w%s|%s)(?:\s*/\s*%s(?:\s*/\s*%s)?)?' % (I, R, AOR, AOR, AOR)
+ S_COUNT = I_COUNT + R_COUNT + 3 * AOR_COUNT # nombre de groupes pour l'expression régulière S
+ S_I_POS = 0 # position du premier groupe de l'expression I dans S
+ S_R_POS = S_I_POS + I_COUNT # position du premier groupe de l'expression R dans S
+ S_DAOR_POS = S_R_POS + R_COUNT # position du premier groupe de l'expression DAOR dans S
+ S_MAOR_POS = S_DAOR_POS + AOR_COUNT # position du premier groupe de l'expression DAOR dans S
+ S_YAOR_POS = S_MAOR_POS + AOR_COUNT # position du premier groupe de l'expression DAOR dans S
+ S_OFFSET = S_I_POS + I_VALUE # numéro relatif du groupe pour la valeur OFFSET
+ S_WD_FROM = S_R_POS + R_FROM # numéro relatif du groupe pour la valeur FROM de WD
+ S_WD_TO = S_R_POS + R_TO # numéro relatif du groupe pour la valeur TO de WD
+ S_D_ANY = S_DAOR_POS + AOR_ANY # numéro relatif du groupe pour la valeur ANY de D
+ S_D_FROM = S_DAOR_POS + AOR_FROM # numéro relatif du groupe pour la valeur FROM de D
+ S_D_TO = S_DAOR_POS + AOR_TO # numéro relatif du groupe pour la valeur TO de D
+ S_M_ANY = S_MAOR_POS + AOR_ANY # numéro relatif du groupe pour la valeur ANY de M
+ S_M_FROM = S_MAOR_POS + AOR_FROM # numéro relatif du groupe pour la valeur FROM de M
+ S_M_TO = S_MAOR_POS + AOR_TO # numéro relatif du groupe pour la valeur TO de M
+ S_Y_ANY = S_YAOR_POS + AOR_ANY # numéro relatif du groupe pour la valeur ANY de Y
+ S_Y_FROM = S_YAOR_POS + AOR_FROM # numéro relatif du groupe pour la valeur FROM de Y
+ S_Y_TO = S_YAOR_POS + AOR_TO # numéro relatif du groupe pour la valeur TO de Y
+
+ RE_SPEC = re.compile(r'(?:(?:%s)|(?:%s))$' % (C, S))
+ # offsets des positions des groupes dans l'expression RE_SPEC
+ SPEC_C_POS = 0
+ SPEC_S_POS = SPEC_C_POS + C_COUNT
+ # position des groupes dans l'expression RE_SPEC
+ SPEC_C_OFF = 1 + SPEC_C_POS
+ CONS_OP = SPEC_C_OFF + C_OP
+ CONS_WD = SPEC_C_OFF + C_WD
+ SPEC_S_OFF = 1 + SPEC_S_POS
+ SPEC_OFFSET = SPEC_S_OFF + S_OFFSET
+ SPEC_WD_FROM = SPEC_S_OFF + S_WD_FROM
+ SPEC_WD_TO = SPEC_S_OFF + S_WD_TO
+ SPEC_D_ANY = SPEC_S_OFF + S_D_ANY
+ SPEC_D_FROM = SPEC_S_OFF + S_D_FROM
+ SPEC_D_TO = SPEC_S_OFF + S_D_TO
+ SPEC_M_ANY = SPEC_S_OFF + S_M_ANY
+ SPEC_M_FROM = SPEC_S_OFF + S_M_FROM
+ SPEC_M_TO = SPEC_S_OFF + S_M_TO
+ SPEC_Y_ANY = SPEC_S_OFF + S_Y_ANY
+ SPEC_Y_FROM = SPEC_S_OFF + S_Y_FROM
+ SPEC_Y_TO = SPEC_S_OFF + S_Y_TO
+
+ def _range(f, t=None):
+ f = int(f)
+ if t is None: t = f
+ else: t = int(t)
+ if t < f: t, f = f, t
+ return (f, t)
+ _range = staticmethod(_range)
+ def _isw(vs): return vs == '*'
+ _isw = staticmethod(_isw)
+ def _isr(vs): return isseq(vs)
+ _isr = staticmethod(_isr)
+ def _matches(cls, vs, v):
+ if cls._isw(vs): return True
+ elif cls._isr(vs): return v >= vs[0] and v <= vs[1]
+ else: raise ValueError("Invalid format: %s" % _s(vs))
+ _matches = classmethod(_matches)
+ def _tostr(cls, vs):
+ if cls._isw(vs):
+ return "*"
+ elif cls._isr(vs):
+ if vs[0] == vs[1]: return "%i" % vs[0]
+ else: return "%i-%i" % vs
+ else: raise ValueError("Invalid format: %s" % _s(vs))
+ _tostr = classmethod(_tostr)
+ def _check_range(cls, name, vs, min, max):
+ if (min is not None and (vs[0] < min or vs[1] < min)) or \
+ (max is not None and (vs[0] > max or vs[1] > max)):
+ if min is None: min = u"-INF"
+ else: min = str(min)
+ if max is None: max = u"+INF"
+ else: max = str(max)
+ raise ValueError("%s values must be in the [%s, %s] range, got %s" % (name, min, max, cls._tostr(vs)))
+ _check_range = classmethod(_check_range)
+ def _check_value(cls, name, v, min, max):
+ if (min is not None and v < min) or (max is not None and v > max):
+ if min is None: min = u"-INF"
+ else: min = str(min)
+ if max is None: max = u"+INF"
+ else: max = str(max)
+ raise ValueError("%s value must be in the [%s, %s] range, got %i" % (name, min, max, v))
+ _check_value = classmethod(_check_value)
+
+class DateSpec(_DateSpecConstants):
+ """Une spécification de dates de la forme D[/M[/Y]], ou une spécification
+ de contrainte de date de la forme !W.
+
+ - D peut prendre l'une des formes suivantes:
+ - soit des jours du moins sous la forme *, DAY ou FROM-TO.
+ - soit des jours de la semaine sous la forme "w"WEEKDAY ou "w"FROM-TO
+ avec 1=Lundi, ..., 7=Dimanche
+ - soit une expression relative de la forme "+"DAYS, qui représente
+ DAYS jours après une date de référence.
+ - M représente des mois sous la forme *, MONTH ou FROM-TO.
+ - Y représente des années sous la forme *, YEAR ou FROM-TO.
+ - W représente des jours de la semaine sous la forme "w"WEEKDAY ou
+ "n"WEEKDAY avec 1=Lundi, ..., 7=Dimanche
+
+ Exemples:
+
+ w1-5
+ Les jours de la semaine
+ 15/1-6
+ Les 15 des mois de janvier à juin
+ */1
+ N'importe quel jour du mois de janvier
+ !w4
+ Spécifier que le jour DOIT être un Jeudi.
+ !n4
+ Spécifier que le jour DOIT être le Jeudi *suivant* la date de référence
+ """
+
+ class Strategy(_DateSpecConstants):
+ def matches(self, date):
+ u"""Tester si la date correspond à cette spécification de date
+ """
+ raise NotImplementedError
+
+ def fix(self, date, now=None, refdate=None):
+ u"""Corriger date, refdate étant la date de référence
+ """
+ raise NotImplementedError
+
+ def is_obsolete(self, now=None):
+ u"""Tester si cette spécification de date est obsolète, c'est à
+ dire si elle désigne une date passée.
+ """
+ raise NotImplementedError
+
+ class ConstraintStrategy(Strategy):
+ """Une contrainte de date:
+
+ "!wWEEKDAY" signifie que le jour DOIT être celui spécifié, en restant
+ dans la semaine en cours.
+
+ "!nWEEKDAY" signifie que le jour DOIT être celui spécifié, mais en
+ prenant toujours une date future. Il est alors possible de passer sur
+ la semaine suivante pour arriver au bon jour.
+ """
+ _op = None # op: w ou n
+ _ws = None # weekdays
+
+ def __init__(self, mo):
+ self._op = mo.group(self.CONS_OP)
+ ws = mo.group(self.CONS_WD)
+ if ws is not None: self._ws = self._range(ws)
+ if self._ws is not None:
+ self._check_range("WEEKDAYS", self._ws, 0, 7)
+
+ def __str__(self):
+ s = "!"
+ if self._ws is not None:
+ s += self._op
+ s += self._tostr(self._ws)
+ return s
+
+ def matches(self, date):
+ return True
+
+ def fix(self, date, now=None, refdate=None):
+ date = ensure_date(date)
+ expected_wd = self._ws[0]
+ actual_wd = date.isoweekday()
+ if expected_wd != actual_wd:
+ date += expected_wd - actual_wd
+ if self._op == 'n' and actual_wd > expected_wd:
+ date += 7
+ return date
+
+ def is_obsolete(self, now=None):
+ return False
+
+ class DateStrategy(Strategy):
+ """Une spécification de date
+ """
+ _offset = None # offset
+ _ws = None # weekdays
+ _ds = None # days
+ _ms = None # months
+ _ys = None # years
+
+ def __init__(self, mo):
+ # offset
+ o = mo.group(self.SPEC_OFFSET)
+ if o is None: pass
+ else: self._offset = self._range(o)[0]
+ if self._offset is not None:
+ self._check_value("OFFSET", self._offset, 1, None)
+ # weekdays
+ wf, wt = mo.group(self.SPEC_WD_FROM), mo.group(self.SPEC_WD_TO)
+ if wf is None and wt is None: pass
+ elif wt is not None: self._ws = self._range(wf, wt)
+ else: self._ws = self._range(wf)
+ if self._ws is not None:
+ self._check_range("WEEKDAYS", self._ws, 0, 7)
+ # days
+ dw, df, dt = mo.group(self.SPEC_D_ANY), mo.group(self.SPEC_D_FROM), mo.group(self.SPEC_D_TO)
+ if dw is None and df is None and dt is None: pass
+ elif dw is not None: self._ds = '*'
+ elif dt is not None: self._ds = self._range(df, dt)
+ else: self._ds = self._range(df)
+ # months
+ mw, mf, mt = mo.group(self.SPEC_M_ANY), mo.group(self.SPEC_M_FROM), mo.group(self.SPEC_M_TO)
+ if mw is None and mf is None and mt is None: self._ms = '*'
+ elif mw is not None: self._ms = '*'
+ elif mt is not None: self._ms = self._range(mf, mt)
+ else: self._ms = self._range(mf)
+ # years
+ yw, yf, yt = mo.group(self.SPEC_Y_ANY), mo.group(self.SPEC_Y_FROM), mo.group(self.SPEC_Y_TO)
+ if yw is None and yf is None and yt is None: self._ys = '*'
+ elif yw is not None: self._ys = '*'
+ elif yt is not None: self._ys = self._range(yf, yt)
+ else: self._ys = self._range(yf)
+ if self._isr(self._ys):
+ self._ys = map(_fix_year, self._ys)
+
+ def __str__(self):
+ s = ""
+ if self._offset is not None:
+ s += "+%i" % self._offset
+ if self._ws is not None:
+ s += "w"
+ s += self._tostr(self._ws)
+ elif self._ds is not None:
+ s += self._tostr(self._ds)
+ s += "/"
+ s += self._tostr(self._ms)
+ s += "/"
+ s += self._tostr(self._ys)
+ return s
+
+ def fill_ranges(self, yrs = None, mrs = None, drs = None, wrs = None):
+ if yrs is None: yrs = []
+ yrs.append(self._ys)
+ if mrs is None: mrs = []
+ mrs.append(self._ms)
+ if self._ws is not None:
+ if wrs is None: wrs = []
+ wrs.append(self._ws)
+ elif self._ds is not None:
+ if drs is None: drs = []
+ drs.append(self._ds)
+ return yrs, mrs, drs, wrs
+
+ def matches(self, date):
+ date = ensure_date(date)
+ # tester l'année
+ if not self._matches(self._ys, date.year): return False
+ # tester le mois
+ if not self._matches(self._ms, date.month): return False
+ # tester weekday ou day
+ if self._ws is not None:
+ if not self._matches(self._ws, date.isoweekday()): return False
+ elif self._ds is not None:
+ if not self._matches(self._ds, date.day): return False
+ return True
+
+ def fix(self, date, now=None, refdate=None):
+ if self._offset is not None:
+ if now is None: now = Date()
+ if refdate is None: refdate = now
+ date = refdate + self._offset
+ return date
+
+ def is_obsolete(self, now=None):
+ if self._offset is not None: return False
+ elif self._ws is not None: return False
+ elif self._isw(self._ds): return False
+ elif self._isw(self._ms): return False
+ elif self._isw(self._ys): return False
+ if now is None: now = Date()
+ y = now.year; ys = self._ys
+ if y > ys[0] and y > ys[1]: return True
+ elif y < ys[0] and y < ys[1]: return False
+ m = now.month; ms = self._ms
+ if m > ms[0] and m > ms[1]: return True
+ elif m < ms[0] and m < ms[1]: return False
+ d = now.day; ds = self._ds
+ if d > ds[0] and d > ds[1]: return True
+ return False
+
+ _strategy = None
+ strategy = property(lambda self: self._strategy)
+
+ def is_constraint_spec(self):
+ """Retourner True s'il s'agit d'une spécification de contrainte de date
+ """
+ return isinstance(self._strategy, self.ConstraintStrategy)
+ def is_date_spec(self):
+ """Retourner True s'il s'agit d'une spécification de date
+ """
+ return isinstance(self._strategy, self.DateStrategy)
+
+ def __init__(self, spec):
+ mo = self.RE_SPEC.match(spec)
+ if mo is None:
+ raise ValueError("Invalid DateSpec format: %s" % _s(spec))
+
+ if mo.group(self.CONS_WD) is None: strategy = self.DateStrategy(mo)
+ else: strategy = self.ConstraintStrategy(mo)
+ self._strategy = strategy
+
+ def __str__(self):
+ return self._strategy.__str__()
+
+ def __repr__(self):
+ return "%s(\"%s\")" % (self.__class__.__name__, self)
+
+ def matches(self, date):
+ return self._strategy.matches(date)
+
+ def fix(self, date, now=None, refdate=None):
+ return self._strategy.fix(date, now, refdate)
+
+ def matches_fix(self, date, now=None, refdate=None):
+ if self.matches(date): return True, self.fix(date, now, refdate)
+ else: return False, date
+
+ def is_obsolete(self):
+ return self._strategy.is_obsolete()
+
+class DateSpecs:
+ """Une suite de spécifications de date, séparées par des virgules.
+
+ Attention! l'ordre est important, car les calculs et l'évaluation des
+ contraintes se fait dans l'ordre des spécifications.
+ """
+ RE_COMMA = re.compile(r'\s*,\s*')
+
+ _specs = None
+ def __constraint_specs(self):
+ return [spec for spec in self._specs if spec.is_constraint_spec()]
+ def __date_specs(self):
+ return [spec for spec in self._specs if spec.is_date_spec()]
+
+ def __init__(self, specs):
+ specs = _s(specs).strip()
+ self._specs = [DateSpec(spec) for spec in self.RE_COMMA.split(specs)]
+
+ def __str__(self):
+ return ",".join([str(spec) for spec in self._specs])
+
+ def __repr__(self):
+ return "%s(\"%s\")" % (self.__class__.__name__, self)
+
+ def matches(self, date):
+ for spec in self._specs:
+ if spec.matches(date): return True
+ return False
+
+ def matches_fix(self, date, now=None, refdate=None):
+ if now is None: now = Date()
+ if refdate is None: refdate = now
+ for spec in self.__date_specs():
+ if spec.matches(date):
+ for spec in self._specs:
+ date = spec.fix(date, now, refdate)
+ return True, date
+ return False, date
+
+ _now = None
+ _refdate = None
+ _candidates = None
+
+ def _reset_candidates(self):
+ self._now = None
+ self._refdate = None
+ self._candidates = None
+
+ def _get_candidates(self, now=None, refdate=None):
+ if now is None: now = Date()
+ if refdate is None: refdate = now
+ if self._candidates is not None and \
+ now == self._now and refdate == self._refdate:
+ return self._candidates
+
+ isw = DateSpec._isw
+ # Enumérer les candidats de weekdays, days, months, years
+ yrs = None
+ mrs = None
+ drs = None
+ wrs = None
+ for spec in self.__date_specs():
+ yrs, mrs, drs, wrs = spec.strategy.fill_ranges(yrs, mrs, drs, wrs)
+ # Calculer les dates candidates
+ # ...years
+ candidates = {}
+ if yrs is None: yrs = ['*']
+ for ys in yrs:
+ if ys == '*':
+ candidates[now.year] = {}
+ candidates[now.year + 1] = {}
+ else:
+ for y in range(ys[0], ys[1] + 1):
+ candidates[y] = {}
+ years = candidates.keys()
+ # ...months
+ for year in years:
+ if mrs is None: mrs = ['*']
+ for ms in mrs:
+ if ms == '*':
+ candidates[year][now.month] = {}
+ candidates[year][now.month + 1] = {}
+ else:
+ for m in range(ms[0], ms[1] + 1):
+ candidates[year][m] = {}
+ # ...weekdays or days
+ for year in years:
+ for month in candidates[year].keys():
+ monthdays = range(1, _monthdays(year, month) + 1)
+ #candidates[year][month]['ws'] = None
+ candidates[year][month]['ds'] = None
+ if wrs is not None:
+ # si on précise des jours de semaine,
+ # inclure tous les jours du mois
+ #ws = []
+ #for wr in wrs:
+ # ws.extend(range(wr[0], wr[1] + 1))
+ #candidates[year][month]['ws'] = ws
+ candidates[year][month]['ds'] = monthdays
+ elif drs is not None:
+ ds = []
+ for dr in drs:
+ if isw(dr): ds.extend(monthdays)
+ else: ds.extend(range(dr[0], dr[1] + 1))
+ candidates[year][month]['ds'] = ds
+ else:
+ # ni weekdays, ni days, prendre tous les jours du mois
+ # à configurer ci-dessous quand on saura quel mois prendre
+ candidates[year][month]['ds'] = monthdays
+ # fin
+ self._now = now
+ self._refdate = refdate
+ self._candidates = candidates
+ return candidates
+
+ def get_next_date(self, now=None, refdate=None):
+ if now is None: now = Date()
+ if refdate is None: refdate = now
+ candidates = self._get_candidates(now, refdate)
+ for year in [year for year in sorted(candidates.keys())
+ if year >= now.year]:
+ for month in [month for month in sorted(candidates[year].keys())
+ if Date(0, month + 1, year) >= now]:
+ days = [day for day in candidates[year][month]['ds']
+ if Date(day, month, year) > now]
+ #weekdays = candidates[year][month]['ws']
+ for day in days:
+ next = Date(day, month, year)
+ matches, next = self.matches_fix(next, now, refdate)
+ if matches: return next
+ return None
+
+ def remove_obsoletes(self):
+ specs = [spec for spec in self._specs if not spec.is_obsolete()]
+ if len(specs) != len(self._specs):
+ self._specs = specs
+ self._reset_candidates()
+ return True
+ else:
+ return False
diff --git a/lib/nulib/python/nulib/editor.py b/lib/nulib/python/nulib/editor.py
new file mode 100644
index 0000000..a1e0ed2
--- /dev/null
+++ b/lib/nulib/python/nulib/editor.py
@@ -0,0 +1,130 @@
+# -*- coding: utf-8 -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+"""Des fonctions pour éditer des fichiers.
+"""
+
+__all__ = ('edit_file', 'edit_template')
+
+import os, sys
+
+from .base import isseq
+from .env import get_editor, get_editor_options, get_editor_setrow
+from .uio import EditorIO, _s
+from .lines import Lines
+from .args import split_args
+from .tmpfiles import mktemp
+from .paths import in_PATH
+from .procs import spawn
+
+# options, setrow, setcol, colplus
+EDITOR_CAPS = {'emacs': ('', '+', ':', 1),
+ 'xemacs': ('', '+', ':', 1),
+ 'gvim': ('-f', '+', '', 0),
+ 'vim': ('-f', '+', '', 0),
+ 'vi': ('', '+', '', 0),
+ }
+def get_default_editors():
+ """Retourner une liste d'éditeurs par défaut pour la plateforme en cours
+ """
+ if sys.platform.startswith('linux'):
+ return ('emacs', 'xemacs', 'gvim', 'vim', 'vi')
+ else:
+ return ('xemacs', 'emacs', 'gvim', 'vim', 'vi')
+
+def get_editor_caps():
+ """Obtenir les caractéristiques de l'éditeur configuré.
+
+ @return: (editor, options, setrow, setcol, colplus)
+ """
+ options = None
+ setrow = None
+ setcol = ''
+ colplus = 0
+
+ editor = get_editor()
+ if editor is None:
+ for editor in get_default_editors():
+ if in_PATH(editor): break
+ else:
+ raise OSError("Unable to find a default editor. Please set UTOOLS_EDITOR.")
+
+ if EDITOR_CAPS.has_key(editor):
+ options, setrow, setcol, colplus = EDITOR_CAPS[editor]
+
+ if options is None and setrow is None:
+ options = split_args(get_editor_options())
+ setrow = get_editor_setrow()
+ if options is None and setrow is None and EDITOR_CAPS.has_key(editor):
+ options, setrow, setcol, colplus = EDITOR_CAPS[editor]
+
+ return editor, options, setrow or '', setcol or '', int(colplus)
+
+def edit_file(file, row=None, col=None):
+ """Lancer un éditeur pour éditer le fichier file.
+
+ @return: le status d'exécution de l'éditeur.
+ """
+ editor, options, setrow, setcol, colplus = get_editor_caps()
+
+ cmd = [editor]
+ if options:
+ if isseq(options): cmd.extend(options)
+ else: cmd.append(options)
+ if setrow and row is not None:
+ row = int(row)
+ opt = '%s%i' % (setrow, row)
+ if setcol and col is not None:
+ col = int(col)
+ opt += '%s%i' % (setcol, col + colplus)
+ cmd.append(opt)
+ cmd.append(file)
+ return spawn(*cmd)
+
+def edit_template(template=None, strip_prefix=None, row=None, col=None, lines=None):
+ """Obtenir une valeur éditée dans un éditeur.
+
+ Un fichier temporaire vide est initialisé avec le contenu de template,
+ puis le fichier est proposé à l'édition.
+
+ A la sortie, toutes les lignes commençant par strip_prefix sont supprimée,
+ et une instance de Lines avec les lignes du fichier est retourné.
+
+ @return: lines
+ @rtype: Lines
+ """
+ if lines is None:
+ uio = EditorIO()
+ lines = Lines(uio=uio)
+ else:
+ uio = lines.uio
+ if uio is None:
+ uio = EditorIO()
+ lines.uio = uio
+
+ ## préparer le fichier
+ tmpf, tmpfile = mktemp('utools')
+ try:
+ if template is not None:
+ template = uio.s(template)
+ try: tmpf.write(template)
+ finally: tmpf.close()
+ else:
+ tmpf.close()
+
+ ## l'éditer
+ edit_file(tmpfile, row, col)
+
+ ## traiter le résultat
+ lines.readlines(tmpfile)
+
+ # enlever les préfixes
+ if strip_prefix is not None:
+ lines.filter(lambda l: not l.startswith(strip_prefix))
+
+ # supprimer les lignes vides au début et à la fin
+ while lines and not lines[0].strip(): del lines[0]
+ while lines and not lines[-1].strip(): del lines[-1]
+
+ return lines
+ finally:
+ os.remove(tmpfile)
diff --git a/lib/nulib/python/nulib/encdetect.py b/lib/nulib/python/nulib/encdetect.py
new file mode 100644
index 0000000..cb4a2cc
--- /dev/null
+++ b/lib/nulib/python/nulib/encdetect.py
@@ -0,0 +1,152 @@
+# -*- coding: utf-8 -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+"""Fonctions pour détecter l'encoding d'une chaine ou d'un fichier, et/ou tester
+si c'est un fichier binaire.
+"""
+
+__all__ = ('UNRECOGNIZED_ENCODING', 'UNKNOWN_ENCODING',
+ 'guess_encoding', 'guess_string_encoding', 'guess_stream_encoding',
+ 'detect_line_encoding', 'guess_line_encoding',
+ 'FileType',
+ )
+
+from os import path
+import re
+
+from .base import isstr, make_prop
+from .encoding import LATIN1, UTF8, MACROMAN
+from .env import get_default_encoding
+
+# Les tableaux suivants contiennents les caractères suivants:
+# àâçèéêîïñôû
+
+ISO_8859_1_CHARS = [
+ 0xe0, 0xe2, 0xe7, 0xe8, 0xe9, 0xea,
+ 0xee, 0xef, 0xf1, 0xf4, 0xfb,
+]
+
+MAC_ROMAN_CHARS = [
+ 0x88, 0x89, 0x8d, 0x8f, 0x8e, 0x90,
+ 0x94, 0x95, 0x96, 0x99, 0x9e,
+]
+
+# la séquence est 0xc3 puis l'un des caractères de ce tableau
+UTF_8_CHARS = [
+ 0xa0, 0xa2, 0xa7, 0xa8, 0xa9, 0xaa,
+ 0xae, 0xaf, 0xb1, 0xb4, 0xbb,
+]
+
+UNKNOWN_ENCODING = "Unknown"
+UNRECOGNIZED_ENCODING = "Unrecognized"
+def guess_string_encoding(ins, unknown=UNKNOWN_ENCODING, unrecognized=UNRECOGNIZED_ENCODING):
+ ascii = True
+ i = 0
+ max = len(ins)
+ while i < max:
+ b = ord(ins[i])
+ if b >= 128: ascii = False
+ if b == 0xc3:
+ b = ord(ins[i + 1])
+ if b in UTF_8_CHARS: return UTF8
+ elif b in ISO_8859_1_CHARS: return LATIN1
+ elif b in MAC_ROMAN_CHARS: return MACROMAN
+ elif not ascii: return unrecognized
+ i = i + 1
+ if unknown is None: return get_default_encoding()
+ else: return unknown
+
+def guess_stream_encoding(inf, unknown=UNKNOWN_ENCODING, unrecognized=UNRECOGNIZED_ENCODING):
+ close_inf = False
+ if isstr(inf):
+ inf = open(inf, 'rb')
+ close_inf = True
+ try:
+ return guess_string_encoding(inf.read(), unknown, unrecognized)
+ finally:
+ if close_inf: inf.close()
+
+def guess_encoding(ins=None, inf=None, unknown=None, unrecognized=UNRECOGNIZED_ENCODING):
+ if ins is not None: return guess_string_encoding(ins, unknown, unrecognized)
+ elif inf is not None: return guess_stream_encoding(inf, unknown, unrecognized)
+ else: return unknown
+
+RE_ENCODING = re.compile(r'(?i)\b(?:en)?coding: (\S+)\b')
+def detect_line_encoding(lines, examine_lines=10):
+ nb_lines = len(lines)
+ if nb_lines < 2 * examine_lines:
+ examine_lines = nb_lines
+
+ for line in lines[:examine_lines]:
+ mo = RE_ENCODING.search(line)
+ if mo is not None: return mo.group(1)
+ if nb_lines > examine_lines:
+ for line in lines[-examine_lines:]:
+ mo = RE_ENCODING.search(line)
+ if mo is not None: return mo.group(1)
+ return None
+
+_UNKNOWN = object()
+_UNRECOGNIZED = object()
+def guess_line_encoding(lines, unknown=None, unrecognized=UNRECOGNIZED_ENCODING):
+ for line in lines:
+ encoding = guess_string_encoding(line, _UNKNOWN, _UNRECOGNIZED)
+ if encoding is _UNKNOWN: continue
+ elif encoding is _UNRECOGNIZED: return unrecognized
+ else: return encoding
+ if unknown is None: return get_default_encoding()
+ else: return unknown
+
+class FileType(object):
+ """Un objet servant à déterminer le type d'un fichier:
+ - texte ou binaire
+ - encoding
+
+ XXX finir cette classe, et intégrer les fonctions de paths
+ """
+ _check_ext, check_ext = make_prop('_check_ext', True)[:2]
+ _check_content, check_content = make_prop('_check_content', True)[:2]
+ _file, file = make_prop('_file')[:2]
+
+ def __init__(self, file):
+ self._file = file
+
+ def is_binary(self):
+ binary = self._binary
+ if binary is None and self.check_ext:
+ binary = self.is_binary_ext(self.file)
+ if binary is None and self.check_context:
+ content = self.get_content(self.file)
+ binary = self.is_binary_content(content)
+ if binary is not None:
+ self._binary = binary
+ return binary
+ _binary, binary = make_prop('_binary', getter=is_binary)[:2]
+
+ def is_binary_ext(self, file):
+ _, filename = path.split(file)
+ _, ext = path.splitext(filename)
+ if filename == '.DS_Store': return True
+ else: return ext.lower() in (
+ # exécutables et fichiers objets
+ '.bin', '.com', '.co_', '.exe', '.ex_', '.dll',
+ '.pyc', '.pyd', '.pyo', '.class',
+ '.o', '.so', '.so.*', '.lib', '.ovl',
+ # archives
+ '.gz', '.bz2', '.tar', '.tgz', '.tbz2',
+ '.hqx', '.sit', '.zip', '.jar', '.rpm', '.srpm', '.deb',
+ # multimédia
+ '.bmp', '.gif', '.png', '.jpeg', '.jpg', '.tif', '.tiff',
+ '.xbm', '.icns', '.ico', '.avi', '.mov', '.mpg', '.swf',
+ '.mp3', '.snd', '.ogg', '.dat',
+ # documents
+ '.doc', '.ppt', '.xls', '.pdf',
+ # divers
+ '.bpt', '.bro', '.eps', '.fm', '.ins', '.mcp', '.objectplant',
+ '.ofp', '.opn','.pqg', '.prj', '.ps', '.sl', '.strings', '.wordbreak',
+ )
+
+ def get_content(self, file):
+ pass #XXX
+
+ def is_binary_content(self, content):
+ pass #XXX
diff --git a/lib/nulib/python/nulib/encoding.py b/lib/nulib/python/nulib/encoding.py
new file mode 100644
index 0000000..5896951
--- /dev/null
+++ b/lib/nulib/python/nulib/encoding.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+"""Gestion de la langue et de l'encoding par défaut.
+"""
+
+__all__ = ('LATIN1', 'LATIN9', 'UTF8', 'MACROMAN',
+ 'normalize_encoding', 'get_encoding_or_default',
+ )
+
+import os, locale
+from locale import setlocale, LC_ALL, getlocale, getdefaultlocale
+
+# Gestion des formes normalisées des encodings
+# note: Ces formes sont déclarées normalisées par rapport à ulib, et non par
+# rapport à un quelconque organisme de normalisation.
+
+LATIN1 = 'iso-8859-1'
+LATIN9 = 'iso-8859-15'
+UTF8 = 'utf-8'
+MACROMAN = 'MacRoman'
+
+ENCODING_MAP = {'latin-1': LATIN1,
+ 'latin1': LATIN1,
+ 'iso-8859-1': LATIN1,
+ 'iso-88591': LATIN1,
+ 'iso8859-1': LATIN1,
+ 'iso88591': LATIN1,
+ 'latin-9': LATIN9,
+ 'latin9': LATIN9,
+ 'iso-8859-15': LATIN9,
+ 'iso-885915': LATIN9,
+ 'iso8859-15': LATIN9,
+ 'iso885915': LATIN9,
+ 'utf-8': UTF8,
+ 'utf8': UTF8,
+ 'utf': UTF8,
+ }
+
+def normalize_encoding(encoding):
+ if encoding is None: return None
+ lencoding = str(encoding).lower().replace('_', '-')
+ return ENCODING_MAP.get(lencoding, encoding)
+
+DEFAULT_LANG = 'fr_FR.UTF-8'
+LANG_MAP = {LATIN1: 'fr_FR',
+ LATIN9: 'fr_FR@euro',
+ UTF8: 'fr_FR.UTF-8',
+ }
+
+def get_lang_for_encoding(encoding):
+ return LANG_MAP.get(normalize_encoding(encoding), DEFAULT_LANG)
+
+def __set_locale_noexc(lang):
+ os.environ['LANG'] = lang
+ try:
+ setlocale(LC_ALL, '')
+ return True
+ except locale.Error:
+ return False
+
+__locale_set = False
+def __set_locale():
+ global __locale_set
+ if not __locale_set:
+ lang = os.environ.get('LANG', '')
+ if not lang or normalize_encoding(lang) == UTF8:
+ os.environ['LANG'] = DEFAULT_LANG
+ try:
+ setlocale(LC_ALL, '')
+ except locale.Error:
+ print "WARNING: La valeur LANG='%s' n'est pas valide ou n'a pas été reconnue par le systeme." % os.environ['LANG']
+ langs = (LATIN1, LATIN9, 'C')
+ if os.environ['LANG'] != DEFAULT_LANG:
+ print "WARNING: La valeur LANG='%s' sera utilise à la place si possible." % DEFAULT_LANG
+ if __set_locale_noexc(DEFAULT_LANG):
+ langs = None
+ else:
+ print "WARNING: La valeur LANG='%s' n'a pas pu etre selectionnee." % DEFAULT_LANG
+ if langs is not None:
+ for lang in langs:
+ if __set_locale_noexc(lang):
+ print "NOTE: la valeur LANG='%s' a ete selectionnee" % lang
+ break
+ else:
+ print "WARNING: La valeur LANG='%s' n'a pas pu etre utilisee." % lang
+
+ __locale_set = True
+
+try: from UTOOLS_CONFIG import SET_LOCALE
+except ImportError: SET_LOCALE = True
+if SET_LOCALE: __set_locale()
+
+def get_encoding_or_default(encoding=None, default_encoding=UTF8):
+ """Si encoding est None, essayer de déterminer l'encoding par défaut avec
+ getlocale(), getdefaultlocale() puis default_encoding.
+ """
+ if encoding is None: _, encoding = getlocale()
+ if encoding is None: _, encoding = getdefaultlocale()
+ if encoding is None: encoding = default_encoding
+ return normalize_encoding(encoding)
diff --git a/lib/nulib/python/nulib/env.py b/lib/nulib/python/nulib/env.py
new file mode 100644
index 0000000..878dfbf
--- /dev/null
+++ b/lib/nulib/python/nulib/env.py
@@ -0,0 +1,53 @@
+# -*- coding: utf-8 -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+"""Accès aux paramètres configurables de ulib/utools dans l'environnement.
+"""
+
+__all__ = ('get_default_encoding', 'get_input_encoding', 'get_output_encoding',
+ 'get_editor', 'get_editor_options', 'get_editor_setrow', 'get_editor_encoding',
+ 'get_pager',
+ )
+
+from os import environ
+
+from .encoding import get_encoding_or_default, UTF8
+
+try: from nulib_config import DEFAULT_INPUT_ENCODING
+except ImportError: DEFAULT_INPUT_ENCODING = UTF8
+try: from nulib_config import DEFAULT_OUTPUT_ENCODING
+except ImportError: DEFAULT_OUTPUT_ENCODING = UTF8
+
+def get_default_encoding(encoding=None, default_encoding=DEFAULT_OUTPUT_ENCODING):
+ """Si encoding est None, essayer de déterminer l'encoding par défaut avec
+ getlocale(), getdefaultlocale() puis DEFAULT_ENCODING.
+ """
+ return get_encoding_or_default(encoding, default_encoding)
+
+def get_input_encoding():
+ encoding = environ.get('UTOOLS_INPUT_ENCODING', None)
+ if encoding is None:
+ encoding = environ.get('UTOOLS_OUTPUT_ENCODING', None)
+ return get_default_encoding(encoding, DEFAULT_INPUT_ENCODING)
+
+def get_output_encoding():
+ encoding = environ.get('UTOOLS_OUTPUT_ENCODING', None)
+ return get_default_encoding(encoding, DEFAULT_OUTPUT_ENCODING)
+
+def get_editor():
+ return environ.get('UTOOLS_EDITOR', environ.get('EDITOR', None))
+def get_editor_options():
+ return environ.get('UTOOLS_EDITOR_OPTIONS', None)
+def get_editor_setrow():
+ return environ.get('UTOOLS_EDITOR_SETROW', None)
+def get_editor_encoding():
+ encoding = environ.get('UTOOLS_EDITOR_ENCODING', None)
+ if encoding is None:
+ encoding = environ.get('UTOOLS_INPUT_ENCODING', None)
+ if encoding is None:
+ encoding = environ.get('UTOOLS_OUTPUT_ENCODING', None)
+ return get_default_encoding(encoding, DEFAULT_INPUT_ENCODING)
+
+def get_pager():
+ return environ.get('UTOOLS_PAGER', environ.get('PAGER', None))
+def get_pager_options():
+ return environ.get('UTOOLS_PAGER_OPTIONS', None)
diff --git a/lib/nulib/python/nulib/ext/__init__.py b/lib/nulib/python/nulib/ext/__init__.py
new file mode 100644
index 0000000..9d853e8
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/__init__.py
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ()
+
diff --git a/lib/nulib/python/nulib/ext/flup/__init__.py b/lib/nulib/python/nulib/ext/flup/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/lib/nulib/python/nulib/ext/flup/client/__init__.py b/lib/nulib/python/nulib/ext/flup/client/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/client/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/lib/nulib/python/nulib/ext/flup/client/fcgi_app.py b/lib/nulib/python/nulib/ext/flup/client/fcgi_app.py
new file mode 100644
index 0000000..c1c15ec
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/client/fcgi_app.py
@@ -0,0 +1,461 @@
+# Copyright (c) 2006 Allan Saddi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $Id$
+
+__author__ = 'Allan Saddi '
+__version__ = '$Revision$'
+
+import select
+import struct
+import socket
+import errno
+
+__all__ = ['FCGIApp']
+
+# Constants from the spec.
+FCGI_LISTENSOCK_FILENO = 0
+
+FCGI_HEADER_LEN = 8
+
+FCGI_VERSION_1 = 1
+
+FCGI_BEGIN_REQUEST = 1
+FCGI_ABORT_REQUEST = 2
+FCGI_END_REQUEST = 3
+FCGI_PARAMS = 4
+FCGI_STDIN = 5
+FCGI_STDOUT = 6
+FCGI_STDERR = 7
+FCGI_DATA = 8
+FCGI_GET_VALUES = 9
+FCGI_GET_VALUES_RESULT = 10
+FCGI_UNKNOWN_TYPE = 11
+FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
+
+FCGI_NULL_REQUEST_ID = 0
+
+FCGI_KEEP_CONN = 1
+
+FCGI_RESPONDER = 1
+FCGI_AUTHORIZER = 2
+FCGI_FILTER = 3
+
+FCGI_REQUEST_COMPLETE = 0
+FCGI_CANT_MPX_CONN = 1
+FCGI_OVERLOADED = 2
+FCGI_UNKNOWN_ROLE = 3
+
+FCGI_MAX_CONNS = 'FCGI_MAX_CONNS'
+FCGI_MAX_REQS = 'FCGI_MAX_REQS'
+FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS'
+
+FCGI_Header = '!BBHHBx'
+FCGI_BeginRequestBody = '!HB5x'
+FCGI_EndRequestBody = '!LB3x'
+FCGI_UnknownTypeBody = '!B7x'
+
+FCGI_BeginRequestBody_LEN = struct.calcsize(FCGI_BeginRequestBody)
+FCGI_EndRequestBody_LEN = struct.calcsize(FCGI_EndRequestBody)
+FCGI_UnknownTypeBody_LEN = struct.calcsize(FCGI_UnknownTypeBody)
+
+if __debug__:
+ import time
+
+ # Set non-zero to write debug output to a file.
+ DEBUG = 0
+ DEBUGLOG = '/tmp/fcgi_app.log'
+
+ def _debug(level, msg):
+ if DEBUG < level:
+ return
+
+ try:
+ f = open(DEBUGLOG, 'a')
+ f.write('%sfcgi: %s\n' % (time.ctime()[4:-4], msg))
+ f.close()
+ except:
+ pass
+
+def decode_pair(s, pos=0):
+ """
+ Decodes a name/value pair.
+
+ The number of bytes decoded as well as the name/value pair
+ are returned.
+ """
+ nameLength = ord(s[pos])
+ if nameLength & 128:
+ nameLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
+ pos += 4
+ else:
+ pos += 1
+
+ valueLength = ord(s[pos])
+ if valueLength & 128:
+ valueLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
+ pos += 4
+ else:
+ pos += 1
+
+ name = s[pos:pos+nameLength]
+ pos += nameLength
+ value = s[pos:pos+valueLength]
+ pos += valueLength
+
+ return (pos, (name, value))
+
+def encode_pair(name, value):
+ """
+ Encodes a name/value pair.
+
+ The encoded string is returned.
+ """
+ nameLength = len(name)
+ if nameLength < 128:
+ s = chr(nameLength)
+ else:
+ s = struct.pack('!L', nameLength | 0x80000000L)
+
+ valueLength = len(value)
+ if valueLength < 128:
+ s += chr(valueLength)
+ else:
+ s += struct.pack('!L', valueLength | 0x80000000L)
+
+ return s + name + value
+
+class Record(object):
+ """
+ A FastCGI Record.
+
+ Used for encoding/decoding records.
+ """
+ def __init__(self, type=FCGI_UNKNOWN_TYPE, requestId=FCGI_NULL_REQUEST_ID):
+ self.version = FCGI_VERSION_1
+ self.type = type
+ self.requestId = requestId
+ self.contentLength = 0
+ self.paddingLength = 0
+ self.contentData = ''
+
+ def _recvall(sock, length):
+ """
+ Attempts to receive length bytes from a socket, blocking if necessary.
+ (Socket may be blocking or non-blocking.)
+ """
+ dataList = []
+ recvLen = 0
+ while length:
+ try:
+ data = sock.recv(length)
+ except socket.error, e:
+ if e[0] == errno.EAGAIN:
+ select.select([sock], [], [])
+ continue
+ else:
+ raise
+ if not data: # EOF
+ break
+ dataList.append(data)
+ dataLen = len(data)
+ recvLen += dataLen
+ length -= dataLen
+ return ''.join(dataList), recvLen
+ _recvall = staticmethod(_recvall)
+
+ def read(self, sock):
+ """Read and decode a Record from a socket."""
+ try:
+ header, length = self._recvall(sock, FCGI_HEADER_LEN)
+ except:
+ raise EOFError
+
+ if length < FCGI_HEADER_LEN:
+ raise EOFError
+
+ self.version, self.type, self.requestId, self.contentLength, \
+ self.paddingLength = struct.unpack(FCGI_Header, header)
+
+ if __debug__: _debug(9, 'read: fd = %d, type = %d, requestId = %d, '
+ 'contentLength = %d' %
+ (sock.fileno(), self.type, self.requestId,
+ self.contentLength))
+
+ if self.contentLength:
+ try:
+ self.contentData, length = self._recvall(sock,
+ self.contentLength)
+ except:
+ raise EOFError
+
+ if length < self.contentLength:
+ raise EOFError
+
+ if self.paddingLength:
+ try:
+ self._recvall(sock, self.paddingLength)
+ except:
+ raise EOFError
+
+ def _sendall(sock, data):
+ """
+ Writes data to a socket and does not return until all the data is sent.
+ """
+ length = len(data)
+ while length:
+ try:
+ sent = sock.send(data)
+ except socket.error, e:
+ if e[0] == errno.EAGAIN:
+ select.select([], [sock], [])
+ continue
+ else:
+ raise
+ data = data[sent:]
+ length -= sent
+ _sendall = staticmethod(_sendall)
+
+ def write(self, sock):
+ """Encode and write a Record to a socket."""
+ self.paddingLength = -self.contentLength & 7
+
+ if __debug__: _debug(9, 'write: fd = %d, type = %d, requestId = %d, '
+ 'contentLength = %d' %
+ (sock.fileno(), self.type, self.requestId,
+ self.contentLength))
+
+ header = struct.pack(FCGI_Header, self.version, self.type,
+ self.requestId, self.contentLength,
+ self.paddingLength)
+ self._sendall(sock, header)
+ if self.contentLength:
+ self._sendall(sock, self.contentData)
+ if self.paddingLength:
+ self._sendall(sock, '\x00'*self.paddingLength)
+
+class FCGIApp(object):
+ def __init__(self, command=None, connect=None, host=None, port=None,
+ filterEnviron=True):
+ if host is not None:
+ assert port is not None
+ connect=(host, port)
+
+ assert (command is not None and connect is None) or \
+ (command is None and connect is not None)
+
+ self._command = command
+ self._connect = connect
+
+ self._filterEnviron = filterEnviron
+
+ #sock = self._getConnection()
+ #print self._fcgiGetValues(sock, ['FCGI_MAX_CONNS', 'FCGI_MAX_REQS', 'FCGI_MPXS_CONNS'])
+ #sock.close()
+
+ def __call__(self, environ, start_response):
+ # For sanity's sake, we don't care about FCGI_MPXS_CONN
+ # (connection multiplexing). For every request, we obtain a new
+ # transport socket, perform the request, then discard the socket.
+ # This is, I believe, how mod_fastcgi does things...
+
+ sock = self._getConnection()
+
+ # Since this is going to be the only request on this connection,
+ # set the request ID to 1.
+ requestId = 1
+
+ # Begin the request
+ rec = Record(FCGI_BEGIN_REQUEST, requestId)
+ rec.contentData = struct.pack(FCGI_BeginRequestBody, FCGI_RESPONDER, 0)
+ rec.contentLength = FCGI_BeginRequestBody_LEN
+ rec.write(sock)
+
+ # Filter WSGI environ and send it as FCGI_PARAMS
+ if self._filterEnviron:
+ params = self._defaultFilterEnviron(environ)
+ else:
+ params = self._lightFilterEnviron(environ)
+ # TODO: Anything not from environ that needs to be sent also?
+ self._fcgiParams(sock, requestId, params)
+ self._fcgiParams(sock, requestId, {})
+
+ # Transfer wsgi.input to FCGI_STDIN
+ content_length = int(environ.get('CONTENT_LENGTH') or 0)
+ while True:
+ chunk_size = min(content_length, 4096)
+ s = environ['wsgi.input'].read(chunk_size)
+ content_length -= len(s)
+ rec = Record(FCGI_STDIN, requestId)
+ rec.contentData = s
+ rec.contentLength = len(s)
+ rec.write(sock)
+
+ if not s: break
+
+ # Empty FCGI_DATA stream
+ rec = Record(FCGI_DATA, requestId)
+ rec.write(sock)
+
+ # Main loop. Process FCGI_STDOUT, FCGI_STDERR, FCGI_END_REQUEST
+ # records from the application.
+ result = []
+ while True:
+ inrec = Record()
+ inrec.read(sock)
+ if inrec.type == FCGI_STDOUT:
+ if inrec.contentData:
+ result.append(inrec.contentData)
+ else:
+ # TODO: Should probably be pedantic and no longer
+ # accept FCGI_STDOUT records?
+ pass
+ elif inrec.type == FCGI_STDERR:
+ # Simply forward to wsgi.errors
+ environ['wsgi.errors'].write(inrec.contentData)
+ elif inrec.type == FCGI_END_REQUEST:
+ # TODO: Process appStatus/protocolStatus fields?
+ break
+
+ # Done with this transport socket, close it. (FCGI_KEEP_CONN was not
+ # set in the FCGI_BEGIN_REQUEST record we sent above. So the
+ # application is expected to do the same.)
+ sock.close()
+
+ result = ''.join(result)
+
+ # Parse response headers from FCGI_STDOUT
+ status = '200 OK'
+ headers = []
+ pos = 0
+ while True:
+ eolpos = result.find('\n', pos)
+ if eolpos < 0: break
+ line = result[pos:eolpos-1]
+ pos = eolpos + 1
+
+ # strip in case of CR. NB: This will also strip other
+ # whitespace...
+ line = line.strip()
+
+ # Empty line signifies end of headers
+ if not line: break
+
+ # TODO: Better error handling
+ header, value = line.split(':', 1)
+ header = header.strip().lower()
+ value = value.strip()
+
+ if header == 'status':
+ # Special handling of Status header
+ status = value
+ if status.find(' ') < 0:
+ # Append a dummy reason phrase if one was not provided
+ status += ' FCGIApp'
+ else:
+ headers.append((header, value))
+
+ result = result[pos:]
+
+ # Set WSGI status, headers, and return result.
+ start_response(status, headers)
+ return [result]
+
+ def _getConnection(self):
+ if self._connect is not None:
+ # The simple case. Create a socket and connect to the
+ # application.
+ if type(self._connect) is str:
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ else:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.connect(self._connect)
+ return sock
+
+ # To be done when I have more time...
+ raise NotImplementedError, 'Launching and managing FastCGI programs not yet implemented'
+
+ def _fcgiGetValues(self, sock, vars):
+ # Construct FCGI_GET_VALUES record
+ outrec = Record(FCGI_GET_VALUES)
+ data = []
+ for name in vars:
+ data.append(encode_pair(name, ''))
+ data = ''.join(data)
+ outrec.contentData = data
+ outrec.contentLength = len(data)
+ outrec.write(sock)
+
+ # Await response
+ inrec = Record()
+ inrec.read(sock)
+ result = {}
+ if inrec.type == FCGI_GET_VALUES_RESULT:
+ pos = 0
+ while pos < inrec.contentLength:
+ pos, (name, value) = decode_pair(inrec.contentData, pos)
+ result[name] = value
+ return result
+
+ def _fcgiParams(self, sock, requestId, params):
+ rec = Record(FCGI_PARAMS, requestId)
+ data = []
+ for name,value in params.items():
+ data.append(encode_pair(name, value))
+ data = ''.join(data)
+ rec.contentData = data
+ rec.contentLength = len(data)
+ rec.write(sock)
+
+ _environPrefixes = ['SERVER_', 'HTTP_', 'REQUEST_', 'REMOTE_', 'PATH_',
+ 'CONTENT_']
+ _environCopies = ['SCRIPT_NAME', 'QUERY_STRING', 'AUTH_TYPE']
+ _environRenames = {}
+
+ def _defaultFilterEnviron(self, environ):
+ result = {}
+ for n in environ.keys():
+ for p in self._environPrefixes:
+ if n.startswith(p):
+ result[n] = environ[n]
+ if n in self._environCopies:
+ result[n] = environ[n]
+ if n in self._environRenames:
+ result[self._environRenames[n]] = environ[n]
+
+ return result
+
+ def _lightFilterEnviron(self, environ):
+ result = {}
+ for n in environ.keys():
+ if n.upper() == n:
+ result[n] = environ[n]
+ return result
+
+if __name__ == '__main__':
+ from flup.server.ajp import WSGIServer
+ app = FCGIApp(connect=('localhost', 4242))
+ #import paste.lint
+ #app = paste.lint.middleware(app)
+ WSGIServer(app).run()
diff --git a/lib/nulib/python/nulib/ext/flup/client/scgi_app.py b/lib/nulib/python/nulib/ext/flup/client/scgi_app.py
new file mode 100644
index 0000000..c26cd58
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/client/scgi_app.py
@@ -0,0 +1,176 @@
+# Copyright (c) 2006 Allan Saddi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $Id$
+
+__author__ = 'Allan Saddi '
+__version__ = '$Revision$'
+
+import select
+import struct
+import socket
+import errno
+
+__all__ = ['SCGIApp']
+
+def encodeNetstring(s):
+ return ''.join([str(len(s)), ':', s, ','])
+
+class SCGIApp(object):
+ def __init__(self, connect=None, host=None, port=None,
+ filterEnviron=True):
+ if host is not None:
+ assert port is not None
+ connect=(host, port)
+
+ assert connect is not None
+ self._connect = connect
+
+ self._filterEnviron = filterEnviron
+
+ def __call__(self, environ, start_response):
+ sock = self._getConnection()
+
+ outfile = sock.makefile('w')
+ infile = sock.makefile('r')
+
+ sock.close()
+
+ # Filter WSGI environ and send as request headers
+ if self._filterEnviron:
+ headers = self._defaultFilterEnviron(environ)
+ else:
+ headers = self._lightFilterEnviron(environ)
+ # TODO: Anything not from environ that needs to be sent also?
+
+ content_length = int(environ.get('CONTENT_LENGTH') or 0)
+ if headers.has_key('CONTENT_LENGTH'):
+ del headers['CONTENT_LENGTH']
+
+ headers_out = ['CONTENT_LENGTH', str(content_length), 'SCGI', '1']
+ for k,v in headers.items():
+ headers_out.append(k)
+ headers_out.append(v)
+ headers_out.append('') # For trailing NUL
+ outfile.write(encodeNetstring('\x00'.join(headers_out)))
+
+ # Transfer wsgi.input to outfile
+ while True:
+ chunk_size = min(content_length, 4096)
+ s = environ['wsgi.input'].read(chunk_size)
+ content_length -= len(s)
+ outfile.write(s)
+
+ if not s: break
+
+ outfile.close()
+
+ # Read result from SCGI server
+ result = []
+ while True:
+ buf = infile.read(4096)
+ if not buf: break
+
+ result.append(buf)
+
+ infile.close()
+
+ result = ''.join(result)
+
+ # Parse response headers
+ status = '200 OK'
+ headers = []
+ pos = 0
+ while True:
+ eolpos = result.find('\n', pos)
+ if eolpos < 0: break
+ line = result[pos:eolpos-1]
+ pos = eolpos + 1
+
+ # strip in case of CR. NB: This will also strip other
+ # whitespace...
+ line = line.strip()
+
+ # Empty line signifies end of headers
+ if not line: break
+
+ # TODO: Better error handling
+ header, value = line.split(':', 1)
+ header = header.strip().lower()
+ value = value.strip()
+
+ if header == 'status':
+ # Special handling of Status header
+ status = value
+ if status.find(' ') < 0:
+ # Append a dummy reason phrase if one was not provided
+ status += ' SCGIApp'
+ else:
+ headers.append((header, value))
+
+ result = result[pos:]
+
+ # Set WSGI status, headers, and return result.
+ start_response(status, headers)
+ return [result]
+
+ def _getConnection(self):
+ if type(self._connect) is str:
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ else:
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.connect(self._connect)
+ return sock
+
+ _environPrefixes = ['SERVER_', 'HTTP_', 'REQUEST_', 'REMOTE_', 'PATH_',
+ 'CONTENT_']
+ _environCopies = ['SCRIPT_NAME', 'QUERY_STRING', 'AUTH_TYPE']
+ _environRenames = {}
+
+ def _defaultFilterEnviron(self, environ):
+ result = {}
+ for n in environ.keys():
+ for p in self._environPrefixes:
+ if n.startswith(p):
+ result[n] = environ[n]
+ if n in self._environCopies:
+ result[n] = environ[n]
+ if n in self._environRenames:
+ result[self._environRenames[n]] = environ[n]
+
+ return result
+
+ def _lightFilterEnviron(self, environ):
+ result = {}
+ for n in environ.keys():
+ if n.upper() == n:
+ result[n] = environ[n]
+ return result
+
+if __name__ == '__main__':
+ from flup.server.ajp import WSGIServer
+ app = SCGIApp(connect=('localhost', 4000))
+ #import paste.lint
+ #app = paste.lint.middleware(app)
+ WSGIServer(app).run()
diff --git a/lib/nulib/python/nulib/ext/flup/server/__init__.py b/lib/nulib/python/nulib/ext/flup/server/__init__.py
new file mode 100644
index 0000000..792d600
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/server/__init__.py
@@ -0,0 +1 @@
+#
diff --git a/lib/nulib/python/nulib/ext/flup/server/ajp.py b/lib/nulib/python/nulib/ext/flup/server/ajp.py
new file mode 100644
index 0000000..3dca295
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/server/ajp.py
@@ -0,0 +1,197 @@
+# Copyright (c) 2005, 2006 Allan Saddi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $Id$
+
+"""
+ajp - an AJP 1.3/WSGI gateway.
+
+For more information about AJP and AJP connectors for your web server, see
+.
+
+For more information about the Web Server Gateway Interface, see
+.
+
+Example usage:
+
+ #!/usr/bin/env python
+ import sys
+ from myapplication import app # Assume app is your WSGI application object
+ from ajp import WSGIServer
+ ret = WSGIServer(app).run()
+ sys.exit(ret and 42 or 0)
+
+See the documentation for WSGIServer for more information.
+
+About the bit of logic at the end:
+Upon receiving SIGHUP, the python script will exit with status code 42. This
+can be used by a wrapper script to determine if the python script should be
+re-run. When a SIGINT or SIGTERM is received, the script exits with status
+code 0, possibly indicating a normal exit.
+
+Example wrapper script:
+
+ #!/bin/sh
+ STATUS=42
+ while test $STATUS -eq 42; do
+ python "$@" that_script_above.py
+ STATUS=$?
+ done
+
+Example workers.properties (for mod_jk):
+
+ worker.list=foo
+ worker.foo.port=8009
+ worker.foo.host=localhost
+ worker.foo.type=ajp13
+
+Example httpd.conf (for mod_jk):
+
+ JkWorkersFile /path/to/workers.properties
+ JkMount /* foo
+
+Note that if you mount your ajp application anywhere but the root ("/"), you
+SHOULD specifiy scriptName to the WSGIServer constructor. This will ensure
+that SCRIPT_NAME/PATH_INFO are correctly deduced.
+"""
+
+__author__ = 'Allan Saddi '
+__version__ = '$Revision$'
+
+import socket
+import logging
+
+from flup.server.ajp_base import BaseAJPServer, Connection
+from flup.server.threadedserver import ThreadedServer
+
+__all__ = ['WSGIServer']
+
+class WSGIServer(BaseAJPServer, ThreadedServer):
+ """
+ AJP1.3/WSGI server. Runs your WSGI application as a persistant program
+ that understands AJP1.3. Opens up a TCP socket, binds it, and then
+ waits for forwarded requests from your webserver.
+
+ Why AJP? Two good reasons are that AJP provides load-balancing and
+ fail-over support. Personally, I just wanted something new to
+ implement. :)
+
+ Of course you will need an AJP1.3 connector for your webserver (e.g.
+ mod_jk) - see .
+ """
+ def __init__(self, application, scriptName='', environ=None,
+ multithreaded=True, multiprocess=False,
+ bindAddress=('localhost', 8009), allowedServers=None,
+ loggingLevel=logging.INFO, debug=True, **kw):
+ """
+ scriptName is the initial portion of the URL path that "belongs"
+ to your application. It is used to determine PATH_INFO (which doesn't
+ seem to be passed in). An empty scriptName means your application
+ is mounted at the root of your virtual host.
+
+ environ, which must be a dictionary, can contain any additional
+ environment variables you want to pass to your application.
+
+ bindAddress is the address to bind to, which must be a tuple of
+ length 2. The first element is a string, which is the host name
+ or IPv4 address of a local interface. The 2nd element is the port
+ number.
+
+ allowedServers must be None or a list of strings representing the
+ IPv4 addresses of servers allowed to connect. None means accept
+ connections from anywhere.
+
+ loggingLevel sets the logging level of the module-level logger.
+ """
+ BaseAJPServer.__init__(self, application,
+ scriptName=scriptName,
+ environ=environ,
+ multithreaded=multithreaded,
+ multiprocess=multiprocess,
+ bindAddress=bindAddress,
+ allowedServers=allowedServers,
+ loggingLevel=loggingLevel,
+ debug=debug)
+ for key in ('jobClass', 'jobArgs'):
+ if kw.has_key(key):
+ del kw[key]
+ ThreadedServer.__init__(self, jobClass=Connection, jobArgs=(self,),
+ **kw)
+
+ def run(self):
+ """
+ Main loop. Call this after instantiating WSGIServer. SIGHUP, SIGINT,
+ SIGQUIT, SIGTERM cause it to cleanup and return. (If a SIGHUP
+ is caught, this method returns True. Returns False otherwise.)
+ """
+ self.logger.info('%s starting up', self.__class__.__name__)
+
+ try:
+ sock = self._setupSocket()
+ except socket.error, e:
+ self.logger.error('Failed to bind socket (%s), exiting', e[1])
+ return False
+
+ ret = ThreadedServer.run(self, sock)
+
+ self._cleanupSocket(sock)
+
+ self.logger.info('%s shutting down%s', self.__class__.__name__,
+ self._hupReceived and ' (reload requested)' or '')
+
+ return ret
+
+if __name__ == '__main__':
+ def test_app(environ, start_response):
+ """Probably not the most efficient example."""
+ import cgi
+ start_response('200 OK', [('Content-Type', 'text/html')])
+ yield 'Hello World!\n' \
+ '\n' \
+ '
Hello World!
\n' \
+ '
'
+ names = environ.keys()
+ names.sort()
+ for name in names:
+ yield '
%s
%s
\n' % (
+ name, cgi.escape(`environ[name]`))
+
+ form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ,
+ keep_blank_values=1)
+ if form.list:
+ yield '
Form data
'
+
+ for field in form.list:
+ yield '
%s
%s
\n' % (
+ field.name, field.value)
+
+ yield '
\n' \
+ '\n'
+
+ from wsgiref import validate
+ test_app = validate.validator(test_app)
+ # Explicitly set bindAddress to *:8009 for testing.
+ WSGIServer(test_app,
+ bindAddress=('', 8009), allowedServers=None,
+ loggingLevel=logging.DEBUG).run()
diff --git a/lib/nulib/python/nulib/ext/flup/server/ajp_base.py b/lib/nulib/python/nulib/ext/flup/server/ajp_base.py
new file mode 100644
index 0000000..2acff01
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/server/ajp_base.py
@@ -0,0 +1,956 @@
+# Copyright (c) 2005, 2006 Allan Saddi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $Id$
+
+__author__ = 'Allan Saddi '
+__version__ = '$Revision$'
+
+import sys
+import socket
+import select
+import struct
+import signal
+import logging
+import errno
+import datetime
+import time
+
+# Unfortunately, for now, threads are required.
+import thread
+import threading
+
+__all__ = ['BaseAJPServer']
+
+class NoDefault(object):
+ pass
+
+# Packet header prefixes.
+SERVER_PREFIX = '\x12\x34'
+CONTAINER_PREFIX = 'AB'
+
+# Server packet types.
+PKTTYPE_FWD_REQ = '\x02'
+PKTTYPE_SHUTDOWN = '\x07'
+PKTTYPE_PING = '\x08'
+PKTTYPE_CPING = '\x0a'
+
+# Container packet types.
+PKTTYPE_SEND_BODY = '\x03'
+PKTTYPE_SEND_HEADERS = '\x04'
+PKTTYPE_END_RESPONSE = '\x05'
+PKTTYPE_GET_BODY = '\x06'
+PKTTYPE_CPONG = '\x09'
+
+# Code tables for methods/headers/attributes.
+methodTable = [
+ None,
+ 'OPTIONS',
+ 'GET',
+ 'HEAD',
+ 'POST',
+ 'PUT',
+ 'DELETE',
+ 'TRACE',
+ 'PROPFIND',
+ 'PROPPATCH',
+ 'MKCOL',
+ 'COPY',
+ 'MOVE',
+ 'LOCK',
+ 'UNLOCK',
+ 'ACL',
+ 'REPORT',
+ 'VERSION-CONTROL',
+ 'CHECKIN',
+ 'CHECKOUT',
+ 'UNCHECKOUT',
+ 'SEARCH',
+ 'MKWORKSPACE',
+ 'UPDATE',
+ 'LABEL',
+ 'MERGE',
+ 'BASELINE_CONTROL',
+ 'MKACTIVITY'
+ ]
+
+requestHeaderTable = [
+ None,
+ 'Accept',
+ 'Accept-Charset',
+ 'Accept-Encoding',
+ 'Accept-Language',
+ 'Authorization',
+ 'Connection',
+ 'Content-Type',
+ 'Content-Length',
+ 'Cookie',
+ 'Cookie2',
+ 'Host',
+ 'Pragma',
+ 'Referer',
+ 'User-Agent'
+ ]
+
+attributeTable = [
+ None,
+ 'CONTEXT',
+ 'SERVLET_PATH',
+ 'REMOTE_USER',
+ 'AUTH_TYPE',
+ 'QUERY_STRING',
+ 'JVM_ROUTE',
+ 'SSL_CERT',
+ 'SSL_CIPHER',
+ 'SSL_SESSION',
+ None, # name follows
+ 'SSL_KEY_SIZE'
+ ]
+
+responseHeaderTable = [
+ None,
+ 'content-type',
+ 'content-language',
+ 'content-length',
+ 'date',
+ 'last-modified',
+ 'location',
+ 'set-cookie',
+ 'set-cookie2',
+ 'servlet-engine',
+ 'status',
+ 'www-authenticate'
+ ]
+
+# The main classes use this name for logging.
+LoggerName = 'ajp-wsgi'
+
+# Set up module-level logger.
+console = logging.StreamHandler()
+console.setLevel(logging.DEBUG)
+console.setFormatter(logging.Formatter('%(asctime)s : %(message)s',
+ '%Y-%m-%d %H:%M:%S'))
+logging.getLogger(LoggerName).addHandler(console)
+del console
+
+class ProtocolError(Exception):
+ """
+ Exception raised when the server does something unexpected or
+ sends garbled data. Usually leads to a Connection closing.
+ """
+ pass
+
+def decodeString(data, pos=0):
+ """Decode a string."""
+ try:
+ length = struct.unpack('>H', data[pos:pos+2])[0]
+ pos += 2
+ if length == 0xffff: # This was undocumented!
+ return '', pos
+ s = data[pos:pos+length]
+ return s, pos+length+1 # Don't forget NUL
+ except Exception, e:
+ raise ProtocolError, 'decodeString: '+str(e)
+
+def decodeRequestHeader(data, pos=0):
+ """Decode a request header/value pair."""
+ try:
+ if data[pos] == '\xa0':
+ # Use table
+ i = ord(data[pos+1])
+ name = requestHeaderTable[i]
+ if name is None:
+ raise ValueError, 'bad request header code'
+ pos += 2
+ else:
+ name, pos = decodeString(data, pos)
+ value, pos = decodeString(data, pos)
+ return name, value, pos
+ except Exception, e:
+ raise ProtocolError, 'decodeRequestHeader: '+str(e)
+
+def decodeAttribute(data, pos=0):
+ """Decode a request attribute."""
+ try:
+ i = ord(data[pos])
+ pos += 1
+ if i == 0xff:
+ # end
+ return None, None, pos
+ elif i == 0x0a:
+ # name follows
+ name, pos = decodeString(data, pos)
+ elif i == 0x0b:
+ # Special handling of SSL_KEY_SIZE.
+ name = attributeTable[i]
+ # Value is an int, not a string.
+ value = struct.unpack('>H', data[pos:pos+2])[0]
+ return name, str(value), pos+2
+ else:
+ name = attributeTable[i]
+ if name is None:
+ raise ValueError, 'bad attribute code'
+ value, pos = decodeString(data, pos)
+ return name, value, pos
+ except Exception, e:
+ raise ProtocolError, 'decodeAttribute: '+str(e)
+
+def encodeString(s):
+ """Encode a string."""
+ return struct.pack('>H', len(s)) + s + '\x00'
+
+def encodeResponseHeader(name, value):
+ """Encode a response header/value pair."""
+ lname = name.lower()
+ if lname in responseHeaderTable:
+ # Use table
+ i = responseHeaderTable.index(lname)
+ out = '\xa0' + chr(i)
+ else:
+ out = encodeString(name)
+ out += encodeString(value)
+ return out
+
+class Packet(object):
+ """An AJP message packet."""
+ def __init__(self):
+ self.data = ''
+ # Don't set this on write, it will be calculated automatically.
+ self.length = 0
+
+ def _recvall(sock, length):
+ """
+ Attempts to receive length bytes from a socket, blocking if necessary.
+ (Socket may be blocking or non-blocking.)
+ """
+ dataList = []
+ recvLen = 0
+ while length:
+ try:
+ data = sock.recv(length)
+ except socket.error, e:
+ if e[0] == errno.EAGAIN:
+ select.select([sock], [], [])
+ continue
+ else:
+ raise
+ if not data: # EOF
+ break
+ dataList.append(data)
+ dataLen = len(data)
+ recvLen += dataLen
+ length -= dataLen
+ return ''.join(dataList), recvLen
+ _recvall = staticmethod(_recvall)
+
+ def read(self, sock):
+ """Attempt to read a packet from the server."""
+ try:
+ header, length = self._recvall(sock, 4)
+ except socket.error:
+ # Treat any sort of socket errors as EOF (close Connection).
+ raise EOFError
+
+ if length < 4:
+ raise EOFError
+
+ if header[:2] != SERVER_PREFIX:
+ raise ProtocolError, 'invalid header'
+
+ self.length = struct.unpack('>H', header[2:4])[0]
+ if self.length:
+ try:
+ self.data, length = self._recvall(sock, self.length)
+ except socket.error:
+ raise EOFError
+
+ if length < self.length:
+ raise EOFError
+
+ def _sendall(sock, data):
+ """
+ Writes data to a socket and does not return until all the data is sent.
+ """
+ length = len(data)
+ while length:
+ try:
+ sent = sock.send(data)
+ except socket.error, e:
+ if e[0] == errno.EAGAIN:
+ select.select([], [sock], [])
+ continue
+ else:
+ raise
+ data = data[sent:]
+ length -= sent
+ _sendall = staticmethod(_sendall)
+
+ def write(self, sock):
+ """Send a packet to the server."""
+ self.length = len(self.data)
+ self._sendall(sock, CONTAINER_PREFIX + struct.pack('>H', self.length))
+ if self.length:
+ self._sendall(sock, self.data)
+
+class InputStream(object):
+ """
+ File-like object that represents the request body (if any). Supports
+ the bare mininum methods required by the WSGI spec. Thanks to
+ StringIO for ideas.
+ """
+ def __init__(self, conn):
+ self._conn = conn
+
+ # See WSGIServer.
+ self._shrinkThreshold = conn.server.inputStreamShrinkThreshold
+
+ self._buf = ''
+ self._bufList = []
+ self._pos = 0 # Current read position.
+ self._avail = 0 # Number of bytes currently available.
+ self._length = 0 # Set to Content-Length in request.
+
+ self.logger = logging.getLogger(LoggerName)
+
+ def bytesAvailForAdd(self):
+ return self._length - self._avail
+
+ def _shrinkBuffer(self):
+ """Gets rid of already read data (since we can't rewind)."""
+ if self._pos >= self._shrinkThreshold:
+ self._buf = self._buf[self._pos:]
+ self._avail -= self._pos
+ self._length -= self._pos
+ self._pos = 0
+
+ assert self._avail >= 0 and self._length >= 0
+
+ def _waitForData(self):
+ toAdd = min(self.bytesAvailForAdd(), 0xffff)
+ assert toAdd > 0
+ pkt = Packet()
+ pkt.data = PKTTYPE_GET_BODY + \
+ struct.pack('>H', toAdd)
+ self._conn.writePacket(pkt)
+ self._conn.processInput()
+
+ def read(self, n=-1):
+ if self._pos == self._length:
+ return ''
+ while True:
+ if n < 0 or (self._avail - self._pos) < n:
+ # Not enough data available.
+ if not self.bytesAvailForAdd():
+ # And there's no more coming.
+ newPos = self._avail
+ break
+ else:
+ # Ask for more data and wait.
+ self._waitForData()
+ continue
+ else:
+ newPos = self._pos + n
+ break
+ # Merge buffer list, if necessary.
+ if self._bufList:
+ self._buf += ''.join(self._bufList)
+ self._bufList = []
+ r = self._buf[self._pos:newPos]
+ self._pos = newPos
+ self._shrinkBuffer()
+ return r
+
+ def readline(self, length=None):
+ if self._pos == self._length:
+ return ''
+ while True:
+ # Unfortunately, we need to merge the buffer list early.
+ if self._bufList:
+ self._buf += ''.join(self._bufList)
+ self._bufList = []
+ # Find newline.
+ i = self._buf.find('\n', self._pos)
+ if i < 0:
+ # Not found?
+ if not self.bytesAvailForAdd():
+ # No more data coming.
+ newPos = self._avail
+ break
+ else:
+ if length is not None and len(self._buf) >= length + self._pos:
+ newPos = self._pos + length
+ break
+ # Wait for more to come.
+ self._waitForData()
+ continue
+ else:
+ newPos = i + 1
+ break
+ r = self._buf[self._pos:newPos]
+ self._pos = newPos
+ self._shrinkBuffer()
+ return r
+
+ def readlines(self, sizehint=0):
+ total = 0
+ lines = []
+ line = self.readline()
+ while line:
+ lines.append(line)
+ total += len(line)
+ if 0 < sizehint <= total:
+ break
+ line = self.readline()
+ return lines
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ r = self.readline()
+ if not r:
+ raise StopIteration
+ return r
+
+ def setDataLength(self, length):
+ """
+ Once Content-Length is known, Request calls this method to set it.
+ """
+ self._length = length
+
+ def addData(self, data):
+ """
+ Adds data from the server to this InputStream. Note that we never ask
+ the server for data beyond the Content-Length, so the server should
+ never send us an EOF (empty string argument).
+ """
+ if not data:
+ raise ProtocolError, 'short data'
+ self._bufList.append(data)
+ length = len(data)
+ self._avail += length
+ if self._avail > self._length:
+ raise ProtocolError, 'too much data'
+
+class Request(object):
+ """
+ A Request object. A more fitting name would probably be Transaction, but
+ it's named Request to mirror my FastCGI driver. :) This object
+ encapsulates all the data about the HTTP request and allows the handler
+ to send a response.
+
+ The only attributes/methods that the handler should concern itself
+ with are: environ, input, startResponse(), and write().
+ """
+ # Do not ever change the following value.
+ _maxWrite = 8192 - 4 - 3 - 1 # 8k - pkt header - send body header - NUL
+
+ def __init__(self, conn):
+ self._conn = conn
+
+ self.environ = {}
+ self.input = InputStream(conn)
+
+ self._headersSent = False
+
+ self.logger = logging.getLogger(LoggerName)
+
+ def run(self):
+ self.logger.info('%s %s',
+ self.environ['REQUEST_METHOD'],
+ self.environ['REQUEST_URI'])
+
+ start = datetime.datetime.now()
+
+ try:
+ self._conn.server.handler(self)
+ except:
+ self.logger.exception('Exception caught from handler')
+ if not self._headersSent:
+ self._conn.server.error(self)
+
+ end = datetime.datetime.now()
+
+ # Notify server of end of response (reuse flag is set to true).
+ pkt = Packet()
+ pkt.data = PKTTYPE_END_RESPONSE + '\x01'
+ self._conn.writePacket(pkt)
+
+ handlerTime = end - start
+ self.logger.debug('%s %s done (%.3f secs)',
+ self.environ['REQUEST_METHOD'],
+ self.environ['REQUEST_URI'],
+ handlerTime.seconds +
+ handlerTime.microseconds / 1000000.0)
+
+ # The following methods are called from the Connection to set up this
+ # Request.
+
+ def setMethod(self, value):
+ self.environ['REQUEST_METHOD'] = value
+
+ def setProtocol(self, value):
+ self.environ['SERVER_PROTOCOL'] = value
+
+ def setRequestURI(self, value):
+ self.environ['REQUEST_URI'] = value
+
+ def setRemoteAddr(self, value):
+ self.environ['REMOTE_ADDR'] = value
+
+ def setRemoteHost(self, value):
+ self.environ['REMOTE_HOST'] = value
+
+ def setServerName(self, value):
+ self.environ['SERVER_NAME'] = value
+
+ def setServerPort(self, value):
+ self.environ['SERVER_PORT'] = str(value)
+
+ def setIsSSL(self, value):
+ if value:
+ self.environ['HTTPS'] = 'on'
+
+ def addHeader(self, name, value):
+ name = name.replace('-', '_').upper()
+ if name in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
+ self.environ[name] = value
+ if name == 'CONTENT_LENGTH':
+ length = int(value)
+ self.input.setDataLength(length)
+ else:
+ self.environ['HTTP_'+name] = value
+
+ def addAttribute(self, name, value):
+ self.environ[name] = value
+
+ # The only two methods that should be called from the handler.
+
+ def startResponse(self, statusCode, statusMsg, headers):
+ """
+ Begin the HTTP response. This must only be called once and it
+ must be called before any calls to write().
+
+ statusCode is the integer status code (e.g. 200). statusMsg
+ is the associated reason message (e.g.'OK'). headers is a list
+ of 2-tuples - header name/value pairs. (Both header name and value
+ must be strings.)
+ """
+ assert not self._headersSent, 'Headers already sent!'
+
+ pkt = Packet()
+ pkt.data = PKTTYPE_SEND_HEADERS + \
+ struct.pack('>H', statusCode) + \
+ encodeString(statusMsg) + \
+ struct.pack('>H', len(headers)) + \
+ ''.join([encodeResponseHeader(name, value)
+ for name,value in headers])
+
+ self._conn.writePacket(pkt)
+
+ self._headersSent = True
+
+ def write(self, data):
+ """
+ Write data (which comprises the response body). Note that due to
+ restrictions on AJP packet size, we limit our writes to 8185 bytes
+ each packet.
+ """
+ assert self._headersSent, 'Headers must be sent first!'
+
+ bytesLeft = len(data)
+ while bytesLeft:
+ toWrite = min(bytesLeft, self._maxWrite)
+
+ pkt = Packet()
+ pkt.data = PKTTYPE_SEND_BODY + \
+ struct.pack('>H', toWrite) + \
+ data[:toWrite] + '\x00' # Undocumented
+ self._conn.writePacket(pkt)
+
+ data = data[toWrite:]
+ bytesLeft -= toWrite
+
+class Connection(object):
+ """
+ A single Connection with the server. Requests are not multiplexed over the
+ same connection, so at any given time, the Connection is either
+ waiting for a request, or processing a single request.
+ """
+ def __init__(self, sock, addr, server):
+ self.server = server
+ self._sock = sock
+ self._addr = addr
+
+ self._request = None
+
+ self.logger = logging.getLogger(LoggerName)
+
+ def run(self):
+ self.logger.debug('Connection starting up (%s:%d)',
+ self._addr[0], self._addr[1])
+
+ # Main loop. Errors will cause the loop to be exited and
+ # the socket to be closed.
+ while True:
+ try:
+ self.processInput()
+ except ProtocolError, e:
+ self.logger.error("Protocol error '%s'", str(e))
+ break
+ except (EOFError, KeyboardInterrupt):
+ break
+ except:
+ self.logger.exception('Exception caught in Connection')
+ break
+
+ self.logger.debug('Connection shutting down (%s:%d)',
+ self._addr[0], self._addr[1])
+
+ self._sock.close()
+
+ def processInput(self):
+ """Wait for and process a single packet."""
+ pkt = Packet()
+ select.select([self._sock], [], [])
+ pkt.read(self._sock)
+
+ # Body chunks have no packet type code.
+ if self._request is not None:
+ self._processBody(pkt)
+ return
+
+ if not pkt.length:
+ raise ProtocolError, 'unexpected empty packet'
+
+ pkttype = pkt.data[0]
+ if pkttype == PKTTYPE_FWD_REQ:
+ self._forwardRequest(pkt)
+ elif pkttype == PKTTYPE_SHUTDOWN:
+ self._shutdown(pkt)
+ elif pkttype == PKTTYPE_PING:
+ self._ping(pkt)
+ elif pkttype == PKTTYPE_CPING:
+ self._cping(pkt)
+ else:
+ raise ProtocolError, 'unknown packet type'
+
+ def _forwardRequest(self, pkt):
+ """
+ Creates a Request object, fills it in from the packet, then runs it.
+ """
+ assert self._request is None
+
+ req = self.server.requestClass(self)
+ i = ord(pkt.data[1])
+ method = methodTable[i]
+ if method is None:
+ raise ValueError, 'bad method field'
+ req.setMethod(method)
+ value, pos = decodeString(pkt.data, 2)
+ req.setProtocol(value)
+ value, pos = decodeString(pkt.data, pos)
+ req.setRequestURI(value)
+ value, pos = decodeString(pkt.data, pos)
+ req.setRemoteAddr(value)
+ value, pos = decodeString(pkt.data, pos)
+ req.setRemoteHost(value)
+ value, pos = decodeString(pkt.data, pos)
+ req.setServerName(value)
+ value = struct.unpack('>H', pkt.data[pos:pos+2])[0]
+ req.setServerPort(value)
+ i = ord(pkt.data[pos+2])
+ req.setIsSSL(i != 0)
+
+ # Request headers.
+ numHeaders = struct.unpack('>H', pkt.data[pos+3:pos+5])[0]
+ pos += 5
+ for i in range(numHeaders):
+ name, value, pos = decodeRequestHeader(pkt.data, pos)
+ req.addHeader(name, value)
+
+ # Attributes.
+ while True:
+ name, value, pos = decodeAttribute(pkt.data, pos)
+ if name is None:
+ break
+ req.addAttribute(name, value)
+
+ self._request = req
+
+ # Read first body chunk, if needed.
+ if req.input.bytesAvailForAdd():
+ self.processInput()
+
+ # Run Request.
+ req.run()
+
+ self._request = None
+
+ def _shutdown(self, pkt):
+ """Not sure what to do with this yet."""
+ self.logger.info('Received shutdown request from server')
+
+ def _ping(self, pkt):
+ """I have no idea what this packet means."""
+ self.logger.debug('Received ping')
+
+ def _cping(self, pkt):
+ """Respond to a PING (CPING) packet."""
+ self.logger.debug('Received PING, sending PONG')
+ pkt = Packet()
+ pkt.data = PKTTYPE_CPONG
+ self.writePacket(pkt)
+
+ def _processBody(self, pkt):
+ """
+ Handles a body chunk from the server by appending it to the
+ InputStream.
+ """
+ if pkt.length:
+ length = struct.unpack('>H', pkt.data[:2])[0]
+ self._request.input.addData(pkt.data[2:2+length])
+ else:
+ # Shouldn't really ever get here.
+ self._request.input.addData('')
+
+ def writePacket(self, pkt):
+ """Sends a Packet to the server."""
+ pkt.write(self._sock)
+
+class BaseAJPServer(object):
+ # What Request class to use.
+ requestClass = Request
+
+ # Limits the size of the InputStream's string buffer to this size + 8k.
+ # Since the InputStream is not seekable, we throw away already-read
+ # data once this certain amount has been read. (The 8k is there because
+ # it is the maximum size of new data added per chunk.)
+ inputStreamShrinkThreshold = 102400 - 8192
+
+ def __init__(self, application, scriptName='', environ=None,
+ multithreaded=True, multiprocess=False,
+ bindAddress=('localhost', 8009), allowedServers=NoDefault,
+ loggingLevel=logging.INFO, debug=True):
+ """
+ scriptName is the initial portion of the URL path that "belongs"
+ to your application. It is used to determine PATH_INFO (which doesn't
+ seem to be passed in). An empty scriptName means your application
+ is mounted at the root of your virtual host.
+
+ environ, which must be a dictionary, can contain any additional
+ environment variables you want to pass to your application.
+
+ Set multithreaded to False if your application is not thread-safe.
+
+ Set multiprocess to True to explicitly set wsgi.multiprocess to
+ True. (Only makes sense with threaded servers.)
+
+ bindAddress is the address to bind to, which must be a tuple of
+ length 2. The first element is a string, which is the host name
+ or IPv4 address of a local interface. The 2nd element is the port
+ number.
+
+ allowedServers must be None or a list of strings representing the
+ IPv4 addresses of servers allowed to connect. None means accept
+ connections from anywhere. By default, it is a list containing
+ the single item '127.0.0.1'.
+
+ loggingLevel sets the logging level of the module-level logger.
+ """
+ if environ is None:
+ environ = {}
+
+ self.application = application
+ self.scriptName = scriptName
+ self.environ = environ
+ self.multithreaded = multithreaded
+ self.multiprocess = multiprocess
+ self.debug = debug
+ self._bindAddress = bindAddress
+ if allowedServers is NoDefault:
+ allowedServers = ['127.0.0.1']
+ self._allowedServers = allowedServers
+
+ # Used to force single-threadedness.
+ self._appLock = thread.allocate_lock()
+
+ self.logger = logging.getLogger(LoggerName)
+ self.logger.setLevel(loggingLevel)
+
+ def _setupSocket(self):
+ """Creates and binds the socket for communication with the server."""
+ sock = socket.socket()
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.bind(self._bindAddress)
+ sock.listen(socket.SOMAXCONN)
+ return sock
+
+ def _cleanupSocket(self, sock):
+ """Closes the main socket."""
+ sock.close()
+
+ def _isClientAllowed(self, addr):
+ ret = self._allowedServers is None or addr[0] in self._allowedServers
+ if not ret:
+ self.logger.warning('Server connection from %s disallowed',
+ addr[0])
+ return ret
+
+ def handler(self, request):
+ """
+ WSGI handler. Sets up WSGI environment, calls the application,
+ and sends the application's response.
+ """
+ environ = request.environ
+ environ.update(self.environ)
+
+ environ['wsgi.version'] = (1,0)
+ environ['wsgi.input'] = request.input
+ environ['wsgi.errors'] = sys.stderr
+ environ['wsgi.multithread'] = self.multithreaded
+ environ['wsgi.multiprocess'] = self.multiprocess
+ environ['wsgi.run_once'] = False
+
+ if environ.get('HTTPS', 'off') in ('on', '1'):
+ environ['wsgi.url_scheme'] = 'https'
+ else:
+ environ['wsgi.url_scheme'] = 'http'
+
+ self._sanitizeEnv(environ)
+
+ headers_set = []
+ headers_sent = []
+ result = None
+
+ def write(data):
+ assert type(data) is str, 'write() argument must be string'
+ assert headers_set, 'write() before start_response()'
+
+ if not headers_sent:
+ status, responseHeaders = headers_sent[:] = headers_set
+ statusCode = int(status[:3])
+ statusMsg = status[4:]
+ found = False
+ for header,value in responseHeaders:
+ if header.lower() == 'content-length':
+ found = True
+ break
+ if not found and result is not None:
+ try:
+ if len(result) == 1:
+ responseHeaders.append(('Content-Length',
+ str(len(data))))
+ except:
+ pass
+ request.startResponse(statusCode, statusMsg, responseHeaders)
+
+ request.write(data)
+
+ def start_response(status, response_headers, exc_info=None):
+ if exc_info:
+ try:
+ if headers_sent:
+ # Re-raise if too late
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ exc_info = None # avoid dangling circular ref
+ else:
+ assert not headers_set, 'Headers already set!'
+
+ assert type(status) is str, 'Status must be a string'
+ assert len(status) >= 4, 'Status must be at least 4 characters'
+ assert int(status[:3]), 'Status must begin with 3-digit code'
+ assert status[3] == ' ', 'Status must have a space after code'
+ assert type(response_headers) is list, 'Headers must be a list'
+ if __debug__:
+ for name,val in response_headers:
+ assert type(name) is str, 'Header name "%s" must be a string' % name
+ assert type(val) is str, 'Value of header "%s" must be a string' % name
+
+ headers_set[:] = [status, response_headers]
+ return write
+
+ if not self.multithreaded:
+ self._appLock.acquire()
+ try:
+ try:
+ result = self.application(environ, start_response)
+ try:
+ for data in result:
+ if data:
+ write(data)
+ if not headers_sent:
+ write('') # in case body was empty
+ finally:
+ if hasattr(result, 'close'):
+ result.close()
+ except socket.error, e:
+ if e[0] != errno.EPIPE:
+ raise # Don't let EPIPE propagate beyond server
+ finally:
+ if not self.multithreaded:
+ self._appLock.release()
+
+ def _sanitizeEnv(self, environ):
+ """Fill-in/deduce missing values in environ."""
+ # Namely SCRIPT_NAME/PATH_INFO
+ value = environ['REQUEST_URI']
+ scriptName = environ.get('WSGI_SCRIPT_NAME', self.scriptName)
+ if not value.startswith(scriptName):
+ self.logger.warning('scriptName does not match request URI')
+
+ environ['PATH_INFO'] = value[len(scriptName):]
+ environ['SCRIPT_NAME'] = scriptName
+
+ reqUri = None
+ if environ.has_key('REQUEST_URI'):
+ reqUri = environ['REQUEST_URI'].split('?', 1)
+
+ if not environ.has_key('QUERY_STRING') or not environ['QUERY_STRING']:
+ if reqUri is not None and len(reqUri) > 1:
+ environ['QUERY_STRING'] = reqUri[1]
+ else:
+ environ['QUERY_STRING'] = ''
+
+ def error(self, request):
+ """
+ Override to provide custom error handling. Ideally, however,
+ all errors should be caught at the application level.
+ """
+ if self.debug:
+ request.startResponse(200, 'OK', [('Content-Type', 'text/html')])
+ import cgitb
+ request.write(cgitb.html(sys.exc_info()))
+ else:
+ errorpage = """
+
+Unhandled Exception
+
+
Unhandled Exception
+
An unhandled exception was thrown by the application.
+
+"""
+ request.startResponse(200, 'OK', [('Content-Type', 'text/html')])
+ request.write(errorpage)
diff --git a/lib/nulib/python/nulib/ext/flup/server/ajp_fork.py b/lib/nulib/python/nulib/ext/flup/server/ajp_fork.py
new file mode 100644
index 0000000..111b29c
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/server/ajp_fork.py
@@ -0,0 +1,195 @@
+# Copyright (c) 2005, 2006 Allan Saddi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $Id$
+
+"""
+ajp - an AJP 1.3/WSGI gateway.
+
+For more information about AJP and AJP connectors for your web server, see
+.
+
+For more information about the Web Server Gateway Interface, see
+.
+
+Example usage:
+
+ #!/usr/bin/env python
+ import sys
+ from myapplication import app # Assume app is your WSGI application object
+ from ajp import WSGIServer
+ ret = WSGIServer(app).run()
+ sys.exit(ret and 42 or 0)
+
+See the documentation for WSGIServer for more information.
+
+About the bit of logic at the end:
+Upon receiving SIGHUP, the python script will exit with status code 42. This
+can be used by a wrapper script to determine if the python script should be
+re-run. When a SIGINT or SIGTERM is received, the script exits with status
+code 0, possibly indicating a normal exit.
+
+Example wrapper script:
+
+ #!/bin/sh
+ STATUS=42
+ while test $STATUS -eq 42; do
+ python "$@" that_script_above.py
+ STATUS=$?
+ done
+
+Example workers.properties (for mod_jk):
+
+ worker.list=foo
+ worker.foo.port=8009
+ worker.foo.host=localhost
+ worker.foo.type=ajp13
+
+Example httpd.conf (for mod_jk):
+
+ JkWorkersFile /path/to/workers.properties
+ JkMount /* foo
+
+Note that if you mount your ajp application anywhere but the root ("/"), you
+SHOULD specifiy scriptName to the WSGIServer constructor. This will ensure
+that SCRIPT_NAME/PATH_INFO are correctly deduced.
+"""
+
+__author__ = 'Allan Saddi '
+__version__ = '$Revision$'
+
+import socket
+import logging
+
+from flup.server.ajp_base import BaseAJPServer, Connection
+from flup.server.preforkserver import PreforkServer
+
+__all__ = ['WSGIServer']
+
+class WSGIServer(BaseAJPServer, PreforkServer):
+ """
+ AJP1.3/WSGI server. Runs your WSGI application as a persistant program
+ that understands AJP1.3. Opens up a TCP socket, binds it, and then
+ waits for forwarded requests from your webserver.
+
+ Why AJP? Two good reasons are that AJP provides load-balancing and
+ fail-over support. Personally, I just wanted something new to
+ implement. :)
+
+ Of course you will need an AJP1.3 connector for your webserver (e.g.
+ mod_jk) - see .
+ """
+ def __init__(self, application, scriptName='', environ=None,
+ bindAddress=('localhost', 8009), allowedServers=None,
+ loggingLevel=logging.INFO, debug=True, **kw):
+ """
+ scriptName is the initial portion of the URL path that "belongs"
+ to your application. It is used to determine PATH_INFO (which doesn't
+ seem to be passed in). An empty scriptName means your application
+ is mounted at the root of your virtual host.
+
+ environ, which must be a dictionary, can contain any additional
+ environment variables you want to pass to your application.
+
+ bindAddress is the address to bind to, which must be a tuple of
+ length 2. The first element is a string, which is the host name
+ or IPv4 address of a local interface. The 2nd element is the port
+ number.
+
+ allowedServers must be None or a list of strings representing the
+ IPv4 addresses of servers allowed to connect. None means accept
+ connections from anywhere.
+
+ loggingLevel sets the logging level of the module-level logger.
+ """
+ BaseAJPServer.__init__(self, application,
+ scriptName=scriptName,
+ environ=environ,
+ multithreaded=False,
+ multiprocess=True,
+ bindAddress=bindAddress,
+ allowedServers=allowedServers,
+ loggingLevel=loggingLevel,
+ debug=debug)
+ for key in ('multithreaded', 'multiprocess', 'jobClass', 'jobArgs'):
+ if kw.has_key(key):
+ del kw[key]
+ PreforkServer.__init__(self, jobClass=Connection, jobArgs=(self,), **kw)
+
+ def run(self):
+ """
+ Main loop. Call this after instantiating WSGIServer. SIGHUP, SIGINT,
+ SIGQUIT, SIGTERM cause it to cleanup and return. (If a SIGHUP
+ is caught, this method returns True. Returns False otherwise.)
+ """
+ self.logger.info('%s starting up', self.__class__.__name__)
+
+ try:
+ sock = self._setupSocket()
+ except socket.error, e:
+ self.logger.error('Failed to bind socket (%s), exiting', e[1])
+ return False
+
+ ret = PreforkServer.run(self, sock)
+
+ self._cleanupSocket(sock)
+
+ self.logger.info('%s shutting down%s', self.__class__.__name__,
+ self._hupReceived and ' (reload requested)' or '')
+
+ return ret
+
+if __name__ == '__main__':
+ def test_app(environ, start_response):
+ """Probably not the most efficient example."""
+ import cgi
+ start_response('200 OK', [('Content-Type', 'text/html')])
+ yield 'Hello World!\n' \
+ '\n' \
+ '
Hello World!
\n' \
+ '
'
+ names = environ.keys()
+ names.sort()
+ for name in names:
+ yield '
%s
%s
\n' % (
+ name, cgi.escape(`environ[name]`))
+
+ form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ,
+ keep_blank_values=1)
+ if form.list:
+ yield '
Form data
'
+
+ for field in form.list:
+ yield '
%s
%s
\n' % (
+ field.name, field.value)
+
+ yield '
\n' \
+ '\n'
+
+ from wsgiref import validate
+ test_app = validate.validator(test_app)
+ # Explicitly set bindAddress to *:8009 for testing.
+ WSGIServer(test_app,
+ bindAddress=('', 8009), allowedServers=None,
+ loggingLevel=logging.DEBUG).run()
diff --git a/lib/nulib/python/nulib/ext/flup/server/cgi.py b/lib/nulib/python/nulib/ext/flup/server/cgi.py
new file mode 100644
index 0000000..17cc3ca
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/server/cgi.py
@@ -0,0 +1,71 @@
+# Taken from
+# which was placed in the public domain.
+
+import os, sys
+
+
+__all__ = ['WSGIServer']
+
+
+class WSGIServer(object):
+
+ def __init__(self, application):
+ self.application = application
+
+ def run(self):
+
+ environ = dict(os.environ.items())
+ environ['wsgi.input'] = sys.stdin
+ environ['wsgi.errors'] = sys.stderr
+ environ['wsgi.version'] = (1,0)
+ environ['wsgi.multithread'] = False
+ environ['wsgi.multiprocess'] = True
+ environ['wsgi.run_once'] = True
+
+ if environ.get('HTTPS','off') in ('on','1'):
+ environ['wsgi.url_scheme'] = 'https'
+ else:
+ environ['wsgi.url_scheme'] = 'http'
+
+ headers_set = []
+ headers_sent = []
+
+ def write(data):
+ if not headers_set:
+ raise AssertionError("write() before start_response()")
+
+ elif not headers_sent:
+ # Before the first output, send the stored headers
+ status, response_headers = headers_sent[:] = headers_set
+ sys.stdout.write('Status: %s\r\n' % status)
+ for header in response_headers:
+ sys.stdout.write('%s: %s\r\n' % header)
+ sys.stdout.write('\r\n')
+
+ sys.stdout.write(data)
+ sys.stdout.flush()
+
+ def start_response(status,response_headers,exc_info=None):
+ if exc_info:
+ try:
+ if headers_sent:
+ # Re-raise original exception if headers sent
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ exc_info = None # avoid dangling circular ref
+ elif headers_set:
+ raise AssertionError("Headers already set!")
+
+ headers_set[:] = [status,response_headers]
+ return write
+
+ result = self.application(environ, start_response)
+ try:
+ for data in result:
+ if data: # don't send headers until body appears
+ write(data)
+ if not headers_sent:
+ write('') # send headers now if body was empty
+ finally:
+ if hasattr(result,'close'):
+ result.close()
diff --git a/lib/nulib/python/nulib/ext/flup/server/fcgi.py b/lib/nulib/python/nulib/ext/flup/server/fcgi.py
new file mode 100644
index 0000000..ab160e9
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/server/fcgi.py
@@ -0,0 +1,149 @@
+# Copyright (c) 2005, 2006 Allan Saddi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $Id$
+
+"""
+fcgi - a FastCGI/WSGI gateway.
+
+For more information about FastCGI, see .
+
+For more information about the Web Server Gateway Interface, see
+.
+
+Example usage:
+
+ #!/usr/bin/env python
+ from myapplication import app # Assume app is your WSGI application object
+ from fcgi import WSGIServer
+ WSGIServer(app).run()
+
+See the documentation for WSGIServer for more information.
+
+On most platforms, fcgi will fallback to regular CGI behavior if run in a
+non-FastCGI context. If you want to force CGI behavior, set the environment
+variable FCGI_FORCE_CGI to "Y" or "y".
+"""
+
+__author__ = 'Allan Saddi '
+__version__ = '$Revision$'
+
+import os
+
+from flup.server.fcgi_base import BaseFCGIServer, FCGI_RESPONDER
+from flup.server.threadedserver import ThreadedServer
+
+__all__ = ['WSGIServer']
+
+class WSGIServer(BaseFCGIServer, ThreadedServer):
+ """
+ FastCGI server that supports the Web Server Gateway Interface. See
+ .
+ """
+ def __init__(self, application, environ=None,
+ multithreaded=True, multiprocess=False,
+ bindAddress=None, umask=None, multiplexed=False,
+ debug=True, roles=(FCGI_RESPONDER,), forceCGI=False, **kw):
+ """
+ environ, if present, must be a dictionary-like object. Its
+ contents will be copied into application's environ. Useful
+ for passing application-specific variables.
+
+ bindAddress, if present, must either be a string or a 2-tuple. If
+ present, run() will open its own listening socket. You would use
+ this if you wanted to run your application as an 'external' FastCGI
+ app. (i.e. the webserver would no longer be responsible for starting
+ your app) If a string, it will be interpreted as a filename and a UNIX
+ socket will be opened. If a tuple, the first element, a string,
+ is the interface name/IP to bind to, and the second element (an int)
+ is the port number.
+ """
+ BaseFCGIServer.__init__(self, application,
+ environ=environ,
+ multithreaded=multithreaded,
+ multiprocess=multiprocess,
+ bindAddress=bindAddress,
+ umask=umask,
+ multiplexed=multiplexed,
+ debug=debug,
+ roles=roles,
+ forceCGI=forceCGI)
+ for key in ('jobClass', 'jobArgs'):
+ if kw.has_key(key):
+ del kw[key]
+ ThreadedServer.__init__(self, jobClass=self._connectionClass,
+ jobArgs=(self,), **kw)
+
+ def _isClientAllowed(self, addr):
+ return self._web_server_addrs is None or \
+ (len(addr) == 2 and addr[0] in self._web_server_addrs)
+
+ def run(self):
+ """
+ The main loop. Exits on SIGHUP, SIGINT, SIGTERM. Returns True if
+ SIGHUP was received, False otherwise.
+ """
+ self._web_server_addrs = os.environ.get('FCGI_WEB_SERVER_ADDRS')
+ if self._web_server_addrs is not None:
+ self._web_server_addrs = map(lambda x: x.strip(),
+ self._web_server_addrs.split(','))
+
+ sock = self._setupSocket()
+
+ ret = ThreadedServer.run(self, sock)
+
+ self._cleanupSocket(sock)
+
+ return ret
+
+if __name__ == '__main__':
+ def test_app(environ, start_response):
+ """Probably not the most efficient example."""
+ import cgi
+ start_response('200 OK', [('Content-Type', 'text/html')])
+ yield 'Hello World!\n' \
+ '\n' \
+ '
Hello World!
\n' \
+ '
'
+ names = environ.keys()
+ names.sort()
+ for name in names:
+ yield '
%s
%s
\n' % (
+ name, cgi.escape(`environ[name]`))
+
+ form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ,
+ keep_blank_values=1)
+ if form.list:
+ yield '
Form data
'
+
+ for field in form.list:
+ yield '
%s
%s
\n' % (
+ field.name, field.value)
+
+ yield '
\n' \
+ '\n'
+
+ from wsgiref import validate
+ test_app = validate.validator(test_app)
+ WSGIServer(test_app).run()
diff --git a/lib/nulib/python/nulib/ext/flup/server/fcgi_base.py b/lib/nulib/python/nulib/ext/flup/server/fcgi_base.py
new file mode 100644
index 0000000..31e0ac6
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/server/fcgi_base.py
@@ -0,0 +1,1188 @@
+# Copyright (c) 2002, 2003, 2005, 2006 Allan Saddi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $Id$
+
+__author__ = 'Allan Saddi '
+__version__ = '$Revision$'
+
+import sys
+import os
+import signal
+import struct
+import cStringIO as StringIO
+import select
+import socket
+import errno
+import traceback
+
+try:
+ import thread
+ import threading
+ thread_available = True
+except ImportError:
+ import dummy_thread as thread
+ import dummy_threading as threading
+ thread_available = False
+
+# Apparently 2.3 doesn't define SHUT_WR? Assume it is 1 in this case.
+if not hasattr(socket, 'SHUT_WR'):
+ socket.SHUT_WR = 1
+
+__all__ = ['BaseFCGIServer']
+
+# Constants from the spec.
+FCGI_LISTENSOCK_FILENO = 0
+
+FCGI_HEADER_LEN = 8
+
+FCGI_VERSION_1 = 1
+
+FCGI_BEGIN_REQUEST = 1
+FCGI_ABORT_REQUEST = 2
+FCGI_END_REQUEST = 3
+FCGI_PARAMS = 4
+FCGI_STDIN = 5
+FCGI_STDOUT = 6
+FCGI_STDERR = 7
+FCGI_DATA = 8
+FCGI_GET_VALUES = 9
+FCGI_GET_VALUES_RESULT = 10
+FCGI_UNKNOWN_TYPE = 11
+FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE
+
+FCGI_NULL_REQUEST_ID = 0
+
+FCGI_KEEP_CONN = 1
+
+FCGI_RESPONDER = 1
+FCGI_AUTHORIZER = 2
+FCGI_FILTER = 3
+
+FCGI_REQUEST_COMPLETE = 0
+FCGI_CANT_MPX_CONN = 1
+FCGI_OVERLOADED = 2
+FCGI_UNKNOWN_ROLE = 3
+
+FCGI_MAX_CONNS = 'FCGI_MAX_CONNS'
+FCGI_MAX_REQS = 'FCGI_MAX_REQS'
+FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS'
+
+FCGI_Header = '!BBHHBx'
+FCGI_BeginRequestBody = '!HB5x'
+FCGI_EndRequestBody = '!LB3x'
+FCGI_UnknownTypeBody = '!B7x'
+
+FCGI_EndRequestBody_LEN = struct.calcsize(FCGI_EndRequestBody)
+FCGI_UnknownTypeBody_LEN = struct.calcsize(FCGI_UnknownTypeBody)
+
+if __debug__:
+ import time
+
+ # Set non-zero to write debug output to a file.
+ DEBUG = 0
+ DEBUGLOG = '/tmp/fcgi.log'
+
+ def _debug(level, msg):
+ if DEBUG < level:
+ return
+
+ try:
+ f = open(DEBUGLOG, 'a')
+ f.write('%sfcgi: %s\n' % (time.ctime()[4:-4], msg))
+ f.close()
+ except:
+ pass
+
+class InputStream(object):
+ """
+ File-like object representing FastCGI input streams (FCGI_STDIN and
+ FCGI_DATA). Supports the minimum methods required by WSGI spec.
+ """
+ def __init__(self, conn):
+ self._conn = conn
+
+ # See Server.
+ self._shrinkThreshold = conn.server.inputStreamShrinkThreshold
+
+ self._buf = ''
+ self._bufList = []
+ self._pos = 0 # Current read position.
+ self._avail = 0 # Number of bytes currently available.
+
+ self._eof = False # True when server has sent EOF notification.
+
+ def _shrinkBuffer(self):
+ """Gets rid of already read data (since we can't rewind)."""
+ if self._pos >= self._shrinkThreshold:
+ self._buf = self._buf[self._pos:]
+ self._avail -= self._pos
+ self._pos = 0
+
+ assert self._avail >= 0
+
+ def _waitForData(self):
+ """Waits for more data to become available."""
+ self._conn.process_input()
+
+ def read(self, n=-1):
+ if self._pos == self._avail and self._eof:
+ return ''
+ while True:
+ if n < 0 or (self._avail - self._pos) < n:
+ # Not enough data available.
+ if self._eof:
+ # And there's no more coming.
+ newPos = self._avail
+ break
+ else:
+ # Wait for more data.
+ self._waitForData()
+ continue
+ else:
+ newPos = self._pos + n
+ break
+ # Merge buffer list, if necessary.
+ if self._bufList:
+ self._buf += ''.join(self._bufList)
+ self._bufList = []
+ r = self._buf[self._pos:newPos]
+ self._pos = newPos
+ self._shrinkBuffer()
+ return r
+
+ def readline(self, length=None):
+ if self._pos == self._avail and self._eof:
+ return ''
+ while True:
+ # Unfortunately, we need to merge the buffer list early.
+ if self._bufList:
+ self._buf += ''.join(self._bufList)
+ self._bufList = []
+ # Find newline.
+ i = self._buf.find('\n', self._pos)
+ if i < 0:
+ # Not found?
+ if self._eof:
+ # No more data coming.
+ newPos = self._avail
+ break
+ else:
+ if length is not None and len(self._buf) >= length + self._pos:
+ newPos = self._pos + length
+ break
+ # Wait for more to come.
+ self._waitForData()
+ continue
+ else:
+ newPos = i + 1
+ break
+ r = self._buf[self._pos:newPos]
+ self._pos = newPos
+ self._shrinkBuffer()
+ return r
+
+ def readlines(self, sizehint=0):
+ total = 0
+ lines = []
+ line = self.readline()
+ while line:
+ lines.append(line)
+ total += len(line)
+ if 0 < sizehint <= total:
+ break
+ line = self.readline()
+ return lines
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ r = self.readline()
+ if not r:
+ raise StopIteration
+ return r
+
+ def add_data(self, data):
+ if not data:
+ self._eof = True
+ else:
+ self._bufList.append(data)
+ self._avail += len(data)
+
+class MultiplexedInputStream(InputStream):
+ """
+ A version of InputStream meant to be used with MultiplexedConnections.
+ Assumes the MultiplexedConnection (the producer) and the Request
+ (the consumer) are running in different threads.
+ """
+ def __init__(self, conn):
+ super(MultiplexedInputStream, self).__init__(conn)
+
+ # Arbitrates access to this InputStream (it's used simultaneously
+ # by a Request and its owning Connection object).
+ lock = threading.RLock()
+
+ # Notifies Request thread that there is new data available.
+ self._lock = threading.Condition(lock)
+
+ def _waitForData(self):
+ # Wait for notification from add_data().
+ self._lock.wait()
+
+ def read(self, n=-1):
+ self._lock.acquire()
+ try:
+ return super(MultiplexedInputStream, self).read(n)
+ finally:
+ self._lock.release()
+
+ def readline(self, length=None):
+ self._lock.acquire()
+ try:
+ return super(MultiplexedInputStream, self).readline(length)
+ finally:
+ self._lock.release()
+
+ def add_data(self, data):
+ self._lock.acquire()
+ try:
+ super(MultiplexedInputStream, self).add_data(data)
+ self._lock.notify()
+ finally:
+ self._lock.release()
+
+class OutputStream(object):
+ """
+ FastCGI output stream (FCGI_STDOUT/FCGI_STDERR). By default, calls to
+ write() or writelines() immediately result in Records being sent back
+ to the server. Buffering should be done in a higher level!
+ """
+ def __init__(self, conn, req, type, buffered=False):
+ self._conn = conn
+ self._req = req
+ self._type = type
+ self._buffered = buffered
+ self._bufList = [] # Used if buffered is True
+ self.dataWritten = False
+ self.closed = False
+
+ def _write(self, data):
+ length = len(data)
+ while length:
+ toWrite = min(length, self._req.server.maxwrite - FCGI_HEADER_LEN)
+
+ rec = Record(self._type, self._req.requestId)
+ rec.contentLength = toWrite
+ rec.contentData = data[:toWrite]
+ self._conn.writeRecord(rec)
+
+ data = data[toWrite:]
+ length -= toWrite
+
+ def write(self, data):
+ assert not self.closed
+
+ if not data:
+ return
+
+ self.dataWritten = True
+
+ if self._buffered:
+ self._bufList.append(data)
+ else:
+ self._write(data)
+
+ def writelines(self, lines):
+ assert not self.closed
+
+ for line in lines:
+ self.write(line)
+
+ def flush(self):
+ # Only need to flush if this OutputStream is actually buffered.
+ if self._buffered:
+ data = ''.join(self._bufList)
+ self._bufList = []
+ self._write(data)
+
+ # Though available, the following should NOT be called by WSGI apps.
+ def close(self):
+ """Sends end-of-stream notification, if necessary."""
+ if not self.closed and self.dataWritten:
+ self.flush()
+ rec = Record(self._type, self._req.requestId)
+ self._conn.writeRecord(rec)
+ self.closed = True
+
+class TeeOutputStream(object):
+ """
+ Simple wrapper around two or more output file-like objects that copies
+ written data to all streams.
+ """
+ def __init__(self, streamList):
+ self._streamList = streamList
+
+ def write(self, data):
+ for f in self._streamList:
+ f.write(data)
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+ def flush(self):
+ for f in self._streamList:
+ f.flush()
+
+class StdoutWrapper(object):
+ """
+ Wrapper for sys.stdout so we know if data has actually been written.
+ """
+ def __init__(self, stdout):
+ self._file = stdout
+ self.dataWritten = False
+
+ def write(self, data):
+ if data:
+ self.dataWritten = True
+ self._file.write(data)
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+ def __getattr__(self, name):
+ return getattr(self._file, name)
+
+def decode_pair(s, pos=0):
+ """
+ Decodes a name/value pair.
+
+ The number of bytes decoded as well as the name/value pair
+ are returned.
+ """
+ nameLength = ord(s[pos])
+ if nameLength & 128:
+ nameLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
+ pos += 4
+ else:
+ pos += 1
+
+ valueLength = ord(s[pos])
+ if valueLength & 128:
+ valueLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff
+ pos += 4
+ else:
+ pos += 1
+
+ name = s[pos:pos+nameLength]
+ pos += nameLength
+ value = s[pos:pos+valueLength]
+ pos += valueLength
+
+ return (pos, (name, value))
+
+def encode_pair(name, value):
+ """
+ Encodes a name/value pair.
+
+ The encoded string is returned.
+ """
+ nameLength = len(name)
+ if nameLength < 128:
+ s = chr(nameLength)
+ else:
+ s = struct.pack('!L', nameLength | 0x80000000L)
+
+ valueLength = len(value)
+ if valueLength < 128:
+ s += chr(valueLength)
+ else:
+ s += struct.pack('!L', valueLength | 0x80000000L)
+
+ return s + name + value
+
+class Record(object):
+ """
+ A FastCGI Record.
+
+ Used for encoding/decoding records.
+ """
+ def __init__(self, type=FCGI_UNKNOWN_TYPE, requestId=FCGI_NULL_REQUEST_ID):
+ self.version = FCGI_VERSION_1
+ self.type = type
+ self.requestId = requestId
+ self.contentLength = 0
+ self.paddingLength = 0
+ self.contentData = ''
+
+ def _recvall(sock, length):
+ """
+ Attempts to receive length bytes from a socket, blocking if necessary.
+ (Socket may be blocking or non-blocking.)
+ """
+ dataList = []
+ recvLen = 0
+ while length:
+ try:
+ data = sock.recv(length)
+ except socket.error, e:
+ if e[0] == errno.EAGAIN:
+ select.select([sock], [], [])
+ continue
+ else:
+ raise
+ if not data: # EOF
+ break
+ dataList.append(data)
+ dataLen = len(data)
+ recvLen += dataLen
+ length -= dataLen
+ return ''.join(dataList), recvLen
+ _recvall = staticmethod(_recvall)
+
+ def read(self, sock):
+ """Read and decode a Record from a socket."""
+ try:
+ header, length = self._recvall(sock, FCGI_HEADER_LEN)
+ except:
+ raise EOFError
+
+ if length < FCGI_HEADER_LEN:
+ raise EOFError
+
+ self.version, self.type, self.requestId, self.contentLength, \
+ self.paddingLength = struct.unpack(FCGI_Header, header)
+
+ if __debug__: _debug(9, 'read: fd = %d, type = %d, requestId = %d, '
+ 'contentLength = %d' %
+ (sock.fileno(), self.type, self.requestId,
+ self.contentLength))
+
+ if self.contentLength:
+ try:
+ self.contentData, length = self._recvall(sock,
+ self.contentLength)
+ except:
+ raise EOFError
+
+ if length < self.contentLength:
+ raise EOFError
+
+ if self.paddingLength:
+ try:
+ self._recvall(sock, self.paddingLength)
+ except:
+ raise EOFError
+
+ def _sendall(sock, data):
+ """
+ Writes data to a socket and does not return until all the data is sent.
+ """
+ length = len(data)
+ while length:
+ try:
+ sent = sock.send(data)
+ except socket.error, e:
+ if e[0] == errno.EAGAIN:
+ select.select([], [sock], [])
+ continue
+ else:
+ raise
+ data = data[sent:]
+ length -= sent
+ _sendall = staticmethod(_sendall)
+
+ def write(self, sock):
+ """Encode and write a Record to a socket."""
+ self.paddingLength = -self.contentLength & 7
+
+ if __debug__: _debug(9, 'write: fd = %d, type = %d, requestId = %d, '
+ 'contentLength = %d' %
+ (sock.fileno(), self.type, self.requestId,
+ self.contentLength))
+
+ header = struct.pack(FCGI_Header, self.version, self.type,
+ self.requestId, self.contentLength,
+ self.paddingLength)
+ self._sendall(sock, header)
+ if self.contentLength:
+ self._sendall(sock, self.contentData)
+ if self.paddingLength:
+ self._sendall(sock, '\x00'*self.paddingLength)
+
+class Request(object):
+ """
+ Represents a single FastCGI request.
+
+ These objects are passed to your handler and is the main interface
+ between your handler and the fcgi module. The methods should not
+ be called by your handler. However, server, params, stdin, stdout,
+ stderr, and data are free for your handler's use.
+ """
+ def __init__(self, conn, inputStreamClass):
+ self._conn = conn
+
+ self.server = conn.server
+ self.params = {}
+ self.stdin = inputStreamClass(conn)
+ self.stdout = OutputStream(conn, self, FCGI_STDOUT)
+ self.stderr = OutputStream(conn, self, FCGI_STDERR, buffered=True)
+ self.data = inputStreamClass(conn)
+
+ def run(self):
+ """Runs the handler, flushes the streams, and ends the request."""
+ try:
+ protocolStatus, appStatus = self.server.handler(self)
+ except:
+ traceback.print_exc(file=self.stderr)
+ self.stderr.flush()
+ if not self.stdout.dataWritten:
+ self.server.error(self)
+
+ protocolStatus, appStatus = FCGI_REQUEST_COMPLETE, 0
+
+ if __debug__: _debug(1, 'protocolStatus = %d, appStatus = %d' %
+ (protocolStatus, appStatus))
+
+ try:
+ self._flush()
+ self._end(appStatus, protocolStatus)
+ except socket.error, e:
+ if e[0] != errno.EPIPE:
+ raise
+
+ def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE):
+ self._conn.end_request(self, appStatus, protocolStatus)
+
+ def _flush(self):
+ self.stdout.close()
+ self.stderr.close()
+
+class CGIRequest(Request):
+ """A normal CGI request disguised as a FastCGI request."""
+ def __init__(self, server):
+ # These are normally filled in by Connection.
+ self.requestId = 1
+ self.role = FCGI_RESPONDER
+ self.flags = 0
+ self.aborted = False
+
+ self.server = server
+ self.params = dict(os.environ)
+ self.stdin = sys.stdin
+ self.stdout = StdoutWrapper(sys.stdout) # Oh, the humanity!
+ self.stderr = sys.stderr
+ self.data = StringIO.StringIO()
+
+ def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE):
+ sys.exit(appStatus)
+
+ def _flush(self):
+ # Not buffered, do nothing.
+ pass
+
+class Connection(object):
+ """
+ A Connection with the web server.
+
+ Each Connection is associated with a single socket (which is
+ connected to the web server) and is responsible for handling all
+ the FastCGI message processing for that socket.
+ """
+ _multiplexed = False
+ _inputStreamClass = InputStream
+
+ def __init__(self, sock, addr, server):
+ self._sock = sock
+ self._addr = addr
+ self.server = server
+
+ # Active Requests for this Connection, mapped by request ID.
+ self._requests = {}
+
+ def _cleanupSocket(self):
+ """Close the Connection's socket."""
+ try:
+ self._sock.shutdown(socket.SHUT_WR)
+ except:
+ return
+ try:
+ while True:
+ r, w, e = select.select([self._sock], [], [])
+ if not r or not self._sock.recv(1024):
+ break
+ except:
+ pass
+ self._sock.close()
+
+ def run(self):
+ """Begin processing data from the socket."""
+ self._keepGoing = True
+ while self._keepGoing:
+ try:
+ self.process_input()
+ except (EOFError, KeyboardInterrupt):
+ break
+ except (select.error, socket.error), e:
+ if e[0] == errno.EBADF: # Socket was closed by Request.
+ break
+ raise
+
+ self._cleanupSocket()
+
+ def process_input(self):
+ """Attempt to read a single Record from the socket and process it."""
+ # Currently, any children Request threads notify this Connection
+ # that it is no longer needed by closing the Connection's socket.
+ # We need to put a timeout on select, otherwise we might get
+ # stuck in it indefinitely... (I don't like this solution.)
+ while self._keepGoing:
+ try:
+ r, w, e = select.select([self._sock], [], [], 1.0)
+ except ValueError:
+ # Sigh. ValueError gets thrown sometimes when passing select
+ # a closed socket.
+ raise EOFError
+ if r: break
+ if not self._keepGoing:
+ return
+ rec = Record()
+ rec.read(self._sock)
+
+ if rec.type == FCGI_GET_VALUES:
+ self._do_get_values(rec)
+ elif rec.type == FCGI_BEGIN_REQUEST:
+ self._do_begin_request(rec)
+ elif rec.type == FCGI_ABORT_REQUEST:
+ self._do_abort_request(rec)
+ elif rec.type == FCGI_PARAMS:
+ self._do_params(rec)
+ elif rec.type == FCGI_STDIN:
+ self._do_stdin(rec)
+ elif rec.type == FCGI_DATA:
+ self._do_data(rec)
+ elif rec.requestId == FCGI_NULL_REQUEST_ID:
+ self._do_unknown_type(rec)
+ else:
+ # Need to complain about this.
+ pass
+
+ def writeRecord(self, rec):
+ """
+ Write a Record to the socket.
+ """
+ rec.write(self._sock)
+
+ def end_request(self, req, appStatus=0L,
+ protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
+ """
+ End a Request.
+
+ Called by Request objects. An FCGI_END_REQUEST Record is
+ sent to the web server. If the web server no longer requires
+ the connection, the socket is closed, thereby ending this
+ Connection (run() returns).
+ """
+ rec = Record(FCGI_END_REQUEST, req.requestId)
+ rec.contentData = struct.pack(FCGI_EndRequestBody, appStatus,
+ protocolStatus)
+ rec.contentLength = FCGI_EndRequestBody_LEN
+ self.writeRecord(rec)
+
+ if remove:
+ del self._requests[req.requestId]
+
+ if __debug__: _debug(2, 'end_request: flags = %d' % req.flags)
+
+ if not (req.flags & FCGI_KEEP_CONN) and not self._requests:
+ self._cleanupSocket()
+ self._keepGoing = False
+
+ def _do_get_values(self, inrec):
+ """Handle an FCGI_GET_VALUES request from the web server."""
+ outrec = Record(FCGI_GET_VALUES_RESULT)
+
+ pos = 0
+ while pos < inrec.contentLength:
+ pos, (name, value) = decode_pair(inrec.contentData, pos)
+ cap = self.server.capability.get(name)
+ if cap is not None:
+ outrec.contentData += encode_pair(name, str(cap))
+
+ outrec.contentLength = len(outrec.contentData)
+ self.writeRecord(outrec)
+
+ def _do_begin_request(self, inrec):
+ """Handle an FCGI_BEGIN_REQUEST from the web server."""
+ role, flags = struct.unpack(FCGI_BeginRequestBody, inrec.contentData)
+
+ req = self.server.request_class(self, self._inputStreamClass)
+ req.requestId, req.role, req.flags = inrec.requestId, role, flags
+ req.aborted = False
+
+ if not self._multiplexed and self._requests:
+ # Can't multiplex requests.
+ self.end_request(req, 0L, FCGI_CANT_MPX_CONN, remove=False)
+ else:
+ self._requests[inrec.requestId] = req
+
+ def _do_abort_request(self, inrec):
+ """
+ Handle an FCGI_ABORT_REQUEST from the web server.
+
+ We just mark a flag in the associated Request.
+ """
+ req = self._requests.get(inrec.requestId)
+ if req is not None:
+ req.aborted = True
+
+ def _start_request(self, req):
+ """Run the request."""
+ # Not multiplexed, so run it inline.
+ req.run()
+
+ def _do_params(self, inrec):
+ """
+ Handle an FCGI_PARAMS Record.
+
+ If the last FCGI_PARAMS Record is received, start the request.
+ """
+ req = self._requests.get(inrec.requestId)
+ if req is not None:
+ if inrec.contentLength:
+ pos = 0
+ while pos < inrec.contentLength:
+ pos, (name, value) = decode_pair(inrec.contentData, pos)
+ req.params[name] = value
+ else:
+ self._start_request(req)
+
+ def _do_stdin(self, inrec):
+ """Handle the FCGI_STDIN stream."""
+ req = self._requests.get(inrec.requestId)
+ if req is not None:
+ req.stdin.add_data(inrec.contentData)
+
+ def _do_data(self, inrec):
+ """Handle the FCGI_DATA stream."""
+ req = self._requests.get(inrec.requestId)
+ if req is not None:
+ req.data.add_data(inrec.contentData)
+
+ def _do_unknown_type(self, inrec):
+ """Handle an unknown request type. Respond accordingly."""
+ outrec = Record(FCGI_UNKNOWN_TYPE)
+ outrec.contentData = struct.pack(FCGI_UnknownTypeBody, inrec.type)
+ outrec.contentLength = FCGI_UnknownTypeBody_LEN
+ self.writeRecord(rec)
+
+class MultiplexedConnection(Connection):
+ """
+ A version of Connection capable of handling multiple requests
+ simultaneously.
+ """
+ _multiplexed = True
+ _inputStreamClass = MultiplexedInputStream
+
+ def __init__(self, sock, addr, server):
+ super(MultiplexedConnection, self).__init__(sock, addr, server)
+
+ # Used to arbitrate access to self._requests.
+ lock = threading.RLock()
+
+ # Notification is posted everytime a request completes, allowing us
+ # to quit cleanly.
+ self._lock = threading.Condition(lock)
+
+ def _cleanupSocket(self):
+ # Wait for any outstanding requests before closing the socket.
+ self._lock.acquire()
+ while self._requests:
+ self._lock.wait()
+ self._lock.release()
+
+ super(MultiplexedConnection, self)._cleanupSocket()
+
+ def writeRecord(self, rec):
+ # Must use locking to prevent intermingling of Records from different
+ # threads.
+ self._lock.acquire()
+ try:
+ # Probably faster than calling super. ;)
+ rec.write(self._sock)
+ finally:
+ self._lock.release()
+
+ def end_request(self, req, appStatus=0L,
+ protocolStatus=FCGI_REQUEST_COMPLETE, remove=True):
+ self._lock.acquire()
+ try:
+ super(MultiplexedConnection, self).end_request(req, appStatus,
+ protocolStatus,
+ remove)
+ self._lock.notify()
+ finally:
+ self._lock.release()
+
+ def _do_begin_request(self, inrec):
+ self._lock.acquire()
+ try:
+ super(MultiplexedConnection, self)._do_begin_request(inrec)
+ finally:
+ self._lock.release()
+
+ def _do_abort_request(self, inrec):
+ self._lock.acquire()
+ try:
+ super(MultiplexedConnection, self)._do_abort_request(inrec)
+ finally:
+ self._lock.release()
+
+ def _start_request(self, req):
+ thread.start_new_thread(req.run, ())
+
+ def _do_params(self, inrec):
+ self._lock.acquire()
+ try:
+ super(MultiplexedConnection, self)._do_params(inrec)
+ finally:
+ self._lock.release()
+
+ def _do_stdin(self, inrec):
+ self._lock.acquire()
+ try:
+ super(MultiplexedConnection, self)._do_stdin(inrec)
+ finally:
+ self._lock.release()
+
+ def _do_data(self, inrec):
+ self._lock.acquire()
+ try:
+ super(MultiplexedConnection, self)._do_data(inrec)
+ finally:
+ self._lock.release()
+
+class BaseFCGIServer(object):
+ request_class = Request
+ cgirequest_class = CGIRequest
+
+ # The maximum number of bytes (per Record) to write to the server.
+ # I've noticed mod_fastcgi has a relatively small receive buffer (8K or
+ # so).
+ maxwrite = 8192
+
+ # Limits the size of the InputStream's string buffer to this size + the
+ # server's maximum Record size. Since the InputStream is not seekable,
+ # we throw away already-read data once this certain amount has been read.
+ inputStreamShrinkThreshold = 102400 - 8192
+
+ def __init__(self, application, environ=None,
+ multithreaded=True, multiprocess=False,
+ bindAddress=None, umask=None, multiplexed=False,
+ debug=True, roles=(FCGI_RESPONDER,),
+ forceCGI=False):
+ """
+ bindAddress, if present, must either be a string or a 2-tuple. If
+ present, run() will open its own listening socket. You would use
+ this if you wanted to run your application as an 'external' FastCGI
+ app. (i.e. the webserver would no longer be responsible for starting
+ your app) If a string, it will be interpreted as a filename and a UNIX
+ socket will be opened. If a tuple, the first element, a string,
+ is the interface name/IP to bind to, and the second element (an int)
+ is the port number.
+
+ If binding to a UNIX socket, umask may be set to specify what
+ the umask is to be changed to before the socket is created in the
+ filesystem. After the socket is created, the previous umask is
+ restored.
+
+ Set multiplexed to True if you want to handle multiple requests
+ per connection. Some FastCGI backends (namely mod_fastcgi) don't
+ multiplex requests at all, so by default this is off (which saves
+ on thread creation/locking overhead). If threads aren't available,
+ this keyword is ignored; it's not possible to multiplex requests
+ at all.
+ """
+ if environ is None:
+ environ = {}
+
+ self.application = application
+ self.environ = environ
+ self.multithreaded = multithreaded
+ self.multiprocess = multiprocess
+ self.debug = debug
+ self.roles = roles
+ self.forceCGI = forceCGI
+
+ self._bindAddress = bindAddress
+ self._umask = umask
+
+ # Used to force single-threadedness
+ self._appLock = thread.allocate_lock()
+
+ if thread_available:
+ try:
+ import resource
+ # Attempt to glean the maximum number of connections
+ # from the OS.
+ maxConns = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
+ except ImportError:
+ maxConns = 100 # Just some made up number.
+ maxReqs = maxConns
+ if multiplexed:
+ self._connectionClass = MultiplexedConnection
+ maxReqs *= 5 # Another made up number.
+ else:
+ self._connectionClass = Connection
+ self.capability = {
+ FCGI_MAX_CONNS: maxConns,
+ FCGI_MAX_REQS: maxReqs,
+ FCGI_MPXS_CONNS: multiplexed and 1 or 0
+ }
+ else:
+ self._connectionClass = Connection
+ self.capability = {
+ # If threads aren't available, these are pretty much correct.
+ FCGI_MAX_CONNS: 1,
+ FCGI_MAX_REQS: 1,
+ FCGI_MPXS_CONNS: 0
+ }
+
+ def _setupSocket(self):
+ if self._bindAddress is None: # Run as a normal FastCGI?
+ isFCGI = True
+
+ sock = socket.fromfd(FCGI_LISTENSOCK_FILENO, socket.AF_INET,
+ socket.SOCK_STREAM)
+ try:
+ sock.getpeername()
+ except socket.error, e:
+ if e[0] == errno.ENOTSOCK:
+ # Not a socket, assume CGI context.
+ isFCGI = False
+ elif e[0] != errno.ENOTCONN:
+ raise
+
+ # FastCGI/CGI discrimination is broken on Mac OS X.
+ # Set the environment variable FCGI_FORCE_CGI to "Y" or "y"
+ # if you want to run your app as a simple CGI. (You can do
+ # this with Apache's mod_env [not loaded by default in OS X
+ # client, ha ha] and the SetEnv directive.)
+ if not isFCGI or self.forceCGI or \
+ os.environ.get('FCGI_FORCE_CGI', 'N').upper().startswith('Y'):
+ req = self.cgirequest_class(self)
+ req.run()
+ sys.exit(0)
+ else:
+ # Run as a server
+ oldUmask = None
+ if type(self._bindAddress) is str:
+ # Unix socket
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ try:
+ os.unlink(self._bindAddress)
+ except OSError:
+ pass
+ if self._umask is not None:
+ oldUmask = os.umask(self._umask)
+ else:
+ # INET socket
+ assert type(self._bindAddress) is tuple
+ assert len(self._bindAddress) == 2
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+
+ sock.bind(self._bindAddress)
+ sock.listen(socket.SOMAXCONN)
+
+ if oldUmask is not None:
+ os.umask(oldUmask)
+
+ return sock
+
+ def _cleanupSocket(self, sock):
+ """Closes the main socket."""
+ sock.close()
+
+ def handler(self, req):
+ """Special handler for WSGI."""
+ if req.role not in self.roles:
+ return FCGI_UNKNOWN_ROLE, 0
+
+ # Mostly taken from example CGI gateway.
+ environ = req.params
+ environ.update(self.environ)
+
+ environ['wsgi.version'] = (1,0)
+ environ['wsgi.input'] = req.stdin
+ if self._bindAddress is None:
+ stderr = req.stderr
+ else:
+ stderr = TeeOutputStream((sys.stderr, req.stderr))
+ environ['wsgi.errors'] = stderr
+ environ['wsgi.multithread'] = not isinstance(req, CGIRequest) and \
+ thread_available and self.multithreaded
+ environ['wsgi.multiprocess'] = isinstance(req, CGIRequest) or \
+ self.multiprocess
+ environ['wsgi.run_once'] = isinstance(req, CGIRequest)
+
+ if environ.get('HTTPS', 'off') in ('on', '1'):
+ environ['wsgi.url_scheme'] = 'https'
+ else:
+ environ['wsgi.url_scheme'] = 'http'
+
+ self._sanitizeEnv(environ)
+
+ headers_set = []
+ headers_sent = []
+ result = None
+
+ def write(data):
+ assert type(data) is str, 'write() argument must be string'
+ assert headers_set, 'write() before start_response()'
+
+ if not headers_sent:
+ status, responseHeaders = headers_sent[:] = headers_set
+ found = False
+ for header,value in responseHeaders:
+ if header.lower() == 'content-length':
+ found = True
+ break
+ if not found and result is not None:
+ try:
+ if len(result) == 1:
+ responseHeaders.append(('Content-Length',
+ str(len(data))))
+ except:
+ pass
+ s = 'Status: %s\r\n' % status
+ for header in responseHeaders:
+ s += '%s: %s\r\n' % header
+ s += '\r\n'
+ req.stdout.write(s)
+
+ req.stdout.write(data)
+ req.stdout.flush()
+
+ def start_response(status, response_headers, exc_info=None):
+ if exc_info:
+ try:
+ if headers_sent:
+ # Re-raise if too late
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ exc_info = None # avoid dangling circular ref
+ else:
+ assert not headers_set, 'Headers already set!'
+
+ assert type(status) is str, 'Status must be a string'
+ assert len(status) >= 4, 'Status must be at least 4 characters'
+ assert int(status[:3]), 'Status must begin with 3-digit code'
+ assert status[3] == ' ', 'Status must have a space after code'
+ assert type(response_headers) is list, 'Headers must be a list'
+ if __debug__:
+ for name,val in response_headers:
+ assert type(name) is str, 'Header name "%s" must be a string' % name
+ assert type(val) is str, 'Value of header "%s" must be a string' % name
+
+ headers_set[:] = [status, response_headers]
+ return write
+
+ if not self.multithreaded:
+ self._appLock.acquire()
+ try:
+ try:
+ result = self.application(environ, start_response)
+ try:
+ for data in result:
+ if data:
+ write(data)
+ if not headers_sent:
+ write('') # in case body was empty
+ finally:
+ if hasattr(result, 'close'):
+ result.close()
+ except socket.error, e:
+ if e[0] != errno.EPIPE:
+ raise # Don't let EPIPE propagate beyond server
+ finally:
+ if not self.multithreaded:
+ self._appLock.release()
+
+ return FCGI_REQUEST_COMPLETE, 0
+
+ def _sanitizeEnv(self, environ):
+ """Ensure certain values are present, if required by WSGI."""
+ if not environ.has_key('SCRIPT_NAME'):
+ environ['SCRIPT_NAME'] = ''
+
+ reqUri = None
+ if environ.has_key('REQUEST_URI'):
+ reqUri = environ['REQUEST_URI'].split('?', 1)
+
+ if not environ.has_key('PATH_INFO') or not environ['PATH_INFO']:
+ if reqUri is not None:
+ environ['PATH_INFO'] = reqUri[0]
+ else:
+ environ['PATH_INFO'] = ''
+ if not environ.has_key('QUERY_STRING') or not environ['QUERY_STRING']:
+ if reqUri is not None and len(reqUri) > 1:
+ environ['QUERY_STRING'] = reqUri[1]
+ else:
+ environ['QUERY_STRING'] = ''
+
+ # If any of these are missing, it probably signifies a broken
+ # server...
+ for name,default in [('REQUEST_METHOD', 'GET'),
+ ('SERVER_NAME', 'localhost'),
+ ('SERVER_PORT', '80'),
+ ('SERVER_PROTOCOL', 'HTTP/1.0')]:
+ if not environ.has_key(name):
+ environ['wsgi.errors'].write('%s: missing FastCGI param %s '
+ 'required by WSGI!\n' %
+ (self.__class__.__name__, name))
+ environ[name] = default
+
+ def error(self, req):
+ """
+ Called by Request if an exception occurs within the handler. May and
+ should be overridden.
+ """
+ if self.debug:
+ import cgitb
+ req.stdout.write('Content-Type: text/html\r\n\r\n' +
+ cgitb.html(sys.exc_info()))
+ else:
+ errorpage = """
+
+Unhandled Exception
+
+
Unhandled Exception
+
An unhandled exception was thrown by the application.
+
+"""
+ req.stdout.write('Content-Type: text/html\r\n\r\n' +
+ errorpage)
diff --git a/lib/nulib/python/nulib/ext/flup/server/fcgi_fork.py b/lib/nulib/python/nulib/ext/flup/server/fcgi_fork.py
new file mode 100644
index 0000000..d79b777
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/server/fcgi_fork.py
@@ -0,0 +1,168 @@
+# Copyright (c) 2005, 2006 Allan Saddi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $Id$
+
+"""
+fcgi - a FastCGI/WSGI gateway.
+
+For more information about FastCGI, see .
+
+For more information about the Web Server Gateway Interface, see
+.
+
+Example usage:
+
+ #!/usr/bin/env python
+ from myapplication import app # Assume app is your WSGI application object
+ from fcgi import WSGIServer
+ WSGIServer(app).run()
+
+See the documentation for WSGIServer for more information.
+
+On most platforms, fcgi will fallback to regular CGI behavior if run in a
+non-FastCGI context. If you want to force CGI behavior, set the environment
+variable FCGI_FORCE_CGI to "Y" or "y".
+"""
+
+__author__ = 'Allan Saddi '
+__version__ = '$Revision$'
+
+import os
+
+from flup.server.fcgi_base import BaseFCGIServer, FCGI_RESPONDER, \
+ FCGI_MAX_CONNS, FCGI_MAX_REQS, FCGI_MPXS_CONNS
+from flup.server.preforkserver import PreforkServer
+
+__all__ = ['WSGIServer']
+
+class WSGIServer(BaseFCGIServer, PreforkServer):
+ """
+ FastCGI server that supports the Web Server Gateway Interface. See
+ .
+ """
+ def __init__(self, application, environ=None,
+ bindAddress=None, umask=None, multiplexed=False,
+ debug=True, roles=(FCGI_RESPONDER,), forceCGI=False, **kw):
+ """
+ environ, if present, must be a dictionary-like object. Its
+ contents will be copied into application's environ. Useful
+ for passing application-specific variables.
+
+ bindAddress, if present, must either be a string or a 2-tuple. If
+ present, run() will open its own listening socket. You would use
+ this if you wanted to run your application as an 'external' FastCGI
+ app. (i.e. the webserver would no longer be responsible for starting
+ your app) If a string, it will be interpreted as a filename and a UNIX
+ socket will be opened. If a tuple, the first element, a string,
+ is the interface name/IP to bind to, and the second element (an int)
+ is the port number.
+ """
+ BaseFCGIServer.__init__(self, application,
+ environ=environ,
+ multithreaded=False,
+ multiprocess=True,
+ bindAddress=bindAddress,
+ umask=umask,
+ multiplexed=multiplexed,
+ debug=debug,
+ roles=roles,
+ forceCGI=forceCGI)
+ for key in ('multithreaded', 'multiprocess', 'jobClass', 'jobArgs'):
+ if kw.has_key(key):
+ del kw[key]
+ PreforkServer.__init__(self, jobClass=self._connectionClass,
+ jobArgs=(self,), **kw)
+
+ try:
+ import resource
+ # Attempt to glean the maximum number of connections
+ # from the OS.
+ try:
+ maxProcs = resource.getrlimit(resource.RLIMIT_NPROC)[0]
+ maxConns = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
+ maxConns = min(maxConns, maxProcs)
+ except AttributeError:
+ maxConns = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
+ except ImportError:
+ maxConns = 100 # Just some made up number.
+ maxReqs = maxConns
+ self.capability = {
+ FCGI_MAX_CONNS: maxConns,
+ FCGI_MAX_REQS: maxReqs,
+ FCGI_MPXS_CONNS: 0
+ }
+
+ def _isClientAllowed(self, addr):
+ return self._web_server_addrs is None or \
+ (len(addr) == 2 and addr[0] in self._web_server_addrs)
+
+ def run(self):
+ """
+ The main loop. Exits on SIGHUP, SIGINT, SIGTERM. Returns True if
+ SIGHUP was received, False otherwise.
+ """
+ self._web_server_addrs = os.environ.get('FCGI_WEB_SERVER_ADDRS')
+ if self._web_server_addrs is not None:
+ self._web_server_addrs = map(lambda x: x.strip(),
+ self._web_server_addrs.split(','))
+
+ sock = self._setupSocket()
+
+ ret = PreforkServer.run(self, sock)
+
+ self._cleanupSocket(sock)
+
+ return ret
+
+if __name__ == '__main__':
+ def test_app(environ, start_response):
+ """Probably not the most efficient example."""
+ import cgi
+ start_response('200 OK', [('Content-Type', 'text/html')])
+ yield 'Hello World!\n' \
+ '\n' \
+ '
Hello World!
\n' \
+ '
'
+ names = environ.keys()
+ names.sort()
+ for name in names:
+ yield '
%s
%s
\n' % (
+ name, cgi.escape(`environ[name]`))
+
+ form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ,
+ keep_blank_values=1)
+ if form.list:
+ yield '
Form data
'
+
+ for field in form.list:
+ yield '
%s
%s
\n' % (
+ field.name, field.value)
+
+ yield '
\n' \
+ '\n'
+
+ from wsgiref import validate
+ test_app = validate.validator(test_app)
+ WSGIServer(test_app).run()
diff --git a/lib/nulib/python/nulib/ext/flup/server/fcgi_single.py b/lib/nulib/python/nulib/ext/flup/server/fcgi_single.py
new file mode 100644
index 0000000..e62b08f
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/server/fcgi_single.py
@@ -0,0 +1,154 @@
+# Copyright (c) 2005, 2006 Allan Saddi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $Id$
+
+"""
+fcgi - a FastCGI/WSGI gateway.
+
+For more information about FastCGI, see .
+
+For more information about the Web Server Gateway Interface, see
+.
+
+Example usage:
+
+ #!/usr/bin/env python
+ from myapplication import app # Assume app is your WSGI application object
+ from fcgi import WSGIServer
+ WSGIServer(app).run()
+
+See the documentation for WSGIServer for more information.
+
+On most platforms, fcgi will fallback to regular CGI behavior if run in a
+non-FastCGI context. If you want to force CGI behavior, set the environment
+variable FCGI_FORCE_CGI to "Y" or "y".
+"""
+
+__author__ = 'Allan Saddi '
+__version__ = '$Revision$'
+
+import os
+
+from flup.server.fcgi_base import BaseFCGIServer, FCGI_RESPONDER, \
+ FCGI_MAX_CONNS, FCGI_MAX_REQS, FCGI_MPXS_CONNS
+from flup.server.singleserver import SingleServer
+
+__all__ = ['WSGIServer']
+
+class WSGIServer(BaseFCGIServer, SingleServer):
+ """
+ FastCGI server that supports the Web Server Gateway Interface. See
+ .
+ """
+ def __init__(self, application, environ=None,
+ bindAddress=None, umask=None, multiplexed=False,
+ debug=True, roles=(FCGI_RESPONDER,), forceCGI=False, **kw):
+ """
+ environ, if present, must be a dictionary-like object. Its
+ contents will be copied into application's environ. Useful
+ for passing application-specific variables.
+
+ bindAddress, if present, must either be a string or a 2-tuple. If
+ present, run() will open its own listening socket. You would use
+ this if you wanted to run your application as an 'external' FastCGI
+ app. (i.e. the webserver would no longer be responsible for starting
+ your app) If a string, it will be interpreted as a filename and a UNIX
+ socket will be opened. If a tuple, the first element, a string,
+ is the interface name/IP to bind to, and the second element (an int)
+ is the port number.
+ """
+ BaseFCGIServer.__init__(self, application,
+ environ=environ,
+ multithreaded=False,
+ multiprocess=False,
+ bindAddress=bindAddress,
+ umask=umask,
+ multiplexed=multiplexed,
+ debug=debug,
+ roles=roles,
+ forceCGI=forceCGI)
+ for key in ('jobClass', 'jobArgs'):
+ if kw.has_key(key):
+ del kw[key]
+ SingleServer.__init__(self, jobClass=self._connectionClass,
+ jobArgs=(self,), **kw)
+ self.capability = {
+ FCGI_MAX_CONNS: 1,
+ FCGI_MAX_REQS: 1,
+ FCGI_MPXS_CONNS: 0
+ }
+
+ def _isClientAllowed(self, addr):
+ return self._web_server_addrs is None or \
+ (len(addr) == 2 and addr[0] in self._web_server_addrs)
+
+ def run(self):
+ """
+ The main loop. Exits on SIGHUP, SIGINT, SIGTERM. Returns True if
+ SIGHUP was received, False otherwise.
+ """
+ self._web_server_addrs = os.environ.get('FCGI_WEB_SERVER_ADDRS')
+ if self._web_server_addrs is not None:
+ self._web_server_addrs = map(lambda x: x.strip(),
+ self._web_server_addrs.split(','))
+
+ sock = self._setupSocket()
+
+ ret = SingleServer.run(self, sock)
+
+ self._cleanupSocket(sock)
+
+ return ret
+
+if __name__ == '__main__':
+ def test_app(environ, start_response):
+ """Probably not the most efficient example."""
+ import cgi
+ start_response('200 OK', [('Content-Type', 'text/html')])
+ yield 'Hello World!\n' \
+ '\n' \
+ '
Hello World!
\n' \
+ '
'
+ names = environ.keys()
+ names.sort()
+ for name in names:
+ yield '
%s
%s
\n' % (
+ name, cgi.escape(`environ[name]`))
+
+ form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ,
+ keep_blank_values=1)
+ if form.list:
+ yield '
Form data
'
+
+ for field in form.list:
+ yield '
%s
%s
\n' % (
+ field.name, field.value)
+
+ yield '
\n' \
+ '\n'
+
+ from wsgiref import validate
+ test_app = validate.validator(test_app)
+ WSGIServer(test_app).run()
diff --git a/lib/nulib/python/nulib/ext/flup/server/paste_factory.py b/lib/nulib/python/nulib/ext/flup/server/paste_factory.py
new file mode 100644
index 0000000..1bcc867
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/server/paste_factory.py
@@ -0,0 +1,121 @@
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+def asbool(obj):
+ if isinstance(obj, (str, unicode)):
+ obj = obj.strip().lower()
+ if obj in ['true', 'yes', 'on', 'y', 't', '1']:
+ return True
+ elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
+ return False
+ else:
+ raise ValueError(
+ "String is not true/false: %r" % obj)
+ return bool(obj)
+
+def aslist(obj, sep=None, strip=True):
+ if isinstance(obj, (str, unicode)):
+ lst = obj.split(sep)
+ if strip:
+ lst = [v.strip() for v in lst]
+ return lst
+ elif isinstance(obj, (list, tuple)):
+ return obj
+ elif obj is None:
+ return []
+ else:
+ return [obj]
+
+def run_ajp_thread(wsgi_app, global_conf,
+ scriptName='', host='localhost', port='8009',
+ allowedServers='127.0.0.1'):
+ import flup.server.ajp
+ addr = (host, int(port))
+ s = flup.server.ajp.WSGIServer(
+ wsgi_app,
+ scriptName=scriptName,
+ bindAddress=addr,
+ allowedServers=aslist(allowedServers),
+ )
+ s.run()
+
+def run_ajp_fork(wsgi_app, global_conf,
+ scriptName='', host='localhost', port='8009',
+ allowedServers='127.0.0.1'):
+ import flup.server.ajp_fork
+ addr = (host, int(port))
+ s = flup.server.ajp_fork.WSGIServer(
+ wsgi_app,
+ scriptName=scriptName,
+ bindAddress=addr,
+ allowedServers=aslist(allowedServers),
+ )
+ s.run()
+
+def run_fcgi_thread(wsgi_app, global_conf,
+ host=None, port=None,
+ socket=None, umask=None,
+ multiplexed=False):
+ import flup.server.fcgi
+ if socket:
+ assert host is None and port is None
+ sock = socket
+ elif host:
+ assert host is not None and port is not None
+ sock = (host, int(port))
+ else:
+ sock = None
+ if umask is not None:
+ umask = int(umask)
+ s = flup.server.fcgi.WSGIServer(
+ wsgi_app,
+ bindAddress=sock, umask=umask,
+ multiplexed=asbool(multiplexed))
+ s.run()
+
+def run_fcgi_fork(wsgi_app, global_conf,
+ host=None, port=None,
+ socket=None, umask=None,
+ multiplexed=False):
+ import flup.server.fcgi_fork
+ if socket:
+ assert host is None and port is None
+ sock = socket
+ elif host:
+ assert host is not None and port is not None
+ sock = (host, int(port))
+ else:
+ sock = None
+ if umask is not None:
+ umask = int(umask)
+ s = flup.server.fcgi_fork.WSGIServer(
+ wsgi_app,
+ bindAddress=sock, umask=umask,
+ multiplexed=asbool(multiplexed))
+ s.run()
+
+def run_scgi_thread(wsgi_app, global_conf,
+ scriptName='', host='localhost', port='4000',
+ allowedServers='127.0.0.1'):
+ import flup.server.scgi
+ addr = (host, int(port))
+ s = flup.server.scgi.WSGIServer(
+ wsgi_app,
+ scriptName=scriptName,
+ bindAddress=addr,
+ allowedServers=aslist(allowedServers),
+ )
+ s.run()
+
+def run_scgi_fork(wsgi_app, global_conf,
+ scriptName='', host='localhost', port='4000',
+ allowedServers='127.0.0.1'):
+ import flup.server.scgi_fork
+ addr = (host, int(port))
+ s = flup.server.scgi_fork.WSGIServer(
+ wsgi_app,
+ scriptName=scriptName,
+ bindAddress=addr,
+ allowedServers=aslist(allowedServers),
+ )
+ s.run()
+
diff --git a/lib/nulib/python/nulib/ext/flup/server/preforkserver.py b/lib/nulib/python/nulib/ext/flup/server/preforkserver.py
new file mode 100644
index 0000000..5eded2c
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/server/preforkserver.py
@@ -0,0 +1,433 @@
+# Copyright (c) 2005 Allan Saddi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $Id$
+
+__author__ = 'Allan Saddi '
+__version__ = '$Revision$'
+
+import sys
+import os
+import socket
+import select
+import errno
+import signal
+import random
+import time
+
+try:
+ import fcntl
+except ImportError:
+ def setCloseOnExec(sock):
+ pass
+else:
+ def setCloseOnExec(sock):
+ fcntl.fcntl(sock.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC)
+
+# If running Python < 2.4, require eunuchs module for socket.socketpair().
+# See .
+if not hasattr(socket, 'socketpair'):
+ try:
+ import eunuchs.socketpair
+ except ImportError:
+ # TODO: Other alternatives? Perhaps using os.pipe()?
+ raise ImportError, 'Requires eunuchs module for Python < 2.4'
+
+ def socketpair():
+ s1, s2 = eunuchs.socketpair.socketpair()
+ p, c = (socket.fromfd(s1, socket.AF_UNIX, socket.SOCK_STREAM),
+ socket.fromfd(s2, socket.AF_UNIX, socket.SOCK_STREAM))
+ os.close(s1)
+ os.close(s2)
+ return p, c
+
+ socket.socketpair = socketpair
+
+class PreforkServer(object):
+ """
+ A preforked server model conceptually similar to Apache httpd(2). At
+ any given time, ensures there are at least minSpare children ready to
+ process new requests (up to a maximum of maxChildren children total).
+ If the number of idle children is ever above maxSpare, the extra
+ children are killed.
+
+ If maxRequests is positive, each child will only handle that many
+ requests in its lifetime before exiting.
+
+ jobClass should be a class whose constructor takes at least two
+ arguments: the client socket and client address. jobArgs, which
+ must be a list or tuple, is any additional (static) arguments you
+ wish to pass to the constructor.
+
+ jobClass should have a run() method (taking no arguments) that does
+ the actual work. When run() returns, the request is considered
+ complete and the child process moves to idle state.
+ """
+ def __init__(self, minSpare=1, maxSpare=5, maxChildren=50,
+ maxRequests=0, jobClass=None, jobArgs=()):
+ self._minSpare = minSpare
+ self._maxSpare = maxSpare
+ self._maxChildren = max(maxSpare, maxChildren)
+ self._maxRequests = maxRequests
+ self._jobClass = jobClass
+ self._jobArgs = jobArgs
+
+ # Internal state of children. Maps pids to dictionaries with two
+ # members: 'file' and 'avail'. 'file' is the socket to that
+ # individidual child and 'avail' is whether or not the child is
+ # free to process requests.
+ self._children = {}
+
+ def run(self, sock):
+ """
+ The main loop. Pass a socket that is ready to accept() client
+ connections. Return value will be True or False indiciating whether
+ or not the loop was exited due to SIGHUP.
+ """
+ # Set up signal handlers.
+ self._keepGoing = True
+ self._hupReceived = False
+ self._installSignalHandlers()
+
+ # Don't want operations on main socket to block.
+ sock.setblocking(0)
+
+ # Set close-on-exec
+ setCloseOnExec(sock)
+
+ # Main loop.
+ while self._keepGoing:
+ # Maintain minimum number of children.
+ while len(self._children) < self._maxSpare:
+ if not self._spawnChild(sock): break
+
+ # Wait on any socket activity from live children.
+ r = [x['file'] for x in self._children.values()
+ if x['file'] is not None]
+
+ if len(r) == len(self._children):
+ timeout = None
+ else:
+ # There are dead children that need to be reaped, ensure
+ # that they are by timing out, if necessary.
+ timeout = 2
+
+ try:
+ r, w, e = select.select(r, [], [], timeout)
+ except select.error, e:
+ if e[0] != errno.EINTR:
+ raise
+
+ # Scan child sockets and tend to those that need attention.
+ for child in r:
+ # Receive status byte.
+ try:
+ state = child.recv(1)
+ except socket.error, e:
+ if e[0] in (errno.EAGAIN, errno.EINTR):
+ # Guess it really didn't need attention?
+ continue
+ raise
+ # Try to match it with a child. (Do we need a reverse map?)
+ for pid,d in self._children.items():
+ if child is d['file']:
+ if state:
+ # Set availability status accordingly.
+ self._children[pid]['avail'] = state != '\x00'
+ else:
+ # Didn't receive anything. Child is most likely
+ # dead.
+ d = self._children[pid]
+ d['file'].close()
+ d['file'] = None
+ d['avail'] = False
+
+ # Reap children.
+ self._reapChildren()
+
+ # See who and how many children are available.
+ availList = filter(lambda x: x[1]['avail'], self._children.items())
+ avail = len(availList)
+
+ if avail < self._minSpare:
+ # Need to spawn more children.
+ while avail < self._minSpare and \
+ len(self._children) < self._maxChildren:
+ if not self._spawnChild(sock): break
+ avail += 1
+ elif avail > self._maxSpare:
+ # Too many spares, kill off the extras.
+ pids = [x[0] for x in availList]
+ pids.sort()
+ pids = pids[self._maxSpare:]
+ for pid in pids:
+ d = self._children[pid]
+ d['file'].close()
+ d['file'] = None
+ d['avail'] = False
+
+ # Clean up all child processes.
+ self._cleanupChildren()
+
+ # Restore signal handlers.
+ self._restoreSignalHandlers()
+
+ # Return bool based on whether or not SIGHUP was received.
+ return self._hupReceived
+
+ def _cleanupChildren(self):
+ """
+ Closes all child sockets (letting those that are available know
+ that it's time to exit). Sends SIGINT to those that are currently
+ processing (and hopes that it finishses ASAP).
+
+ Any children remaining after 10 seconds is SIGKILLed.
+ """
+ # Let all children know it's time to go.
+ for pid,d in self._children.items():
+ if d['file'] is not None:
+ d['file'].close()
+ d['file'] = None
+ if not d['avail']:
+ # Child is unavailable. SIGINT it.
+ try:
+ os.kill(pid, signal.SIGINT)
+ except OSError, e:
+ if e[0] != errno.ESRCH:
+ raise
+
+ def alrmHandler(signum, frame):
+ pass
+
+ # Set up alarm to wake us up after 10 seconds.
+ oldSIGALRM = signal.getsignal(signal.SIGALRM)
+ signal.signal(signal.SIGALRM, alrmHandler)
+ signal.alarm(10)
+
+ # Wait for all children to die.
+ while len(self._children):
+ try:
+ pid, status = os.wait()
+ except OSError, e:
+ if e[0] in (errno.ECHILD, errno.EINTR):
+ break
+ if self._children.has_key(pid):
+ del self._children[pid]
+
+ signal.signal(signal.SIGALRM, oldSIGALRM)
+
+ # Forcefully kill any remaining children.
+ for pid in self._children.keys():
+ try:
+ os.kill(pid, signal.SIGKILL)
+ except OSError, e:
+ if e[0] != errno.ESRCH:
+ raise
+
+ def _reapChildren(self):
+ """Cleans up self._children whenever children die."""
+ while True:
+ try:
+ pid, status = os.waitpid(-1, os.WNOHANG)
+ except OSError, e:
+ if e[0] == errno.ECHILD:
+ break
+ raise
+ if pid <= 0:
+ break
+ if self._children.has_key(pid): # Sanity check.
+ if self._children[pid]['file'] is not None:
+ self._children[pid]['file'].close()
+ del self._children[pid]
+
+ def _spawnChild(self, sock):
+ """
+ Spawn a single child. Returns True if successful, False otherwise.
+ """
+ # This socket pair is used for very simple communication between
+ # the parent and its children.
+ parent, child = socket.socketpair()
+ parent.setblocking(0)
+ setCloseOnExec(parent)
+ child.setblocking(0)
+ setCloseOnExec(child)
+ try:
+ pid = os.fork()
+ except OSError, e:
+ if e[0] in (errno.EAGAIN, errno.ENOMEM):
+ return False # Can't fork anymore.
+ raise
+ if not pid:
+ # Child
+ child.close()
+ # Put child into its own process group.
+ pid = os.getpid()
+ os.setpgid(pid, pid)
+ # Restore signal handlers.
+ self._restoreSignalHandlers()
+ # Close copies of child sockets.
+ for f in [x['file'] for x in self._children.values()
+ if x['file'] is not None]:
+ f.close()
+ self._children = {}
+ try:
+ # Enter main loop.
+ self._child(sock, parent)
+ except KeyboardInterrupt:
+ pass
+ sys.exit(0)
+ else:
+ # Parent
+ parent.close()
+ d = self._children[pid] = {}
+ d['file'] = child
+ d['avail'] = True
+ return True
+
+ def _isClientAllowed(self, addr):
+ """Override to provide access control."""
+ return True
+
+ def _notifyParent(self, parent, msg):
+ """Send message to parent, ignoring EPIPE and retrying on EAGAIN"""
+ while True:
+ try:
+ parent.send(msg)
+ return True
+ except socket.error, e:
+ if e[0] == errno.EPIPE:
+ return False # Parent is gone
+ if e[0] == errno.EAGAIN:
+ # Wait for socket change before sending again
+ select.select([], [parent], [])
+ else:
+ raise
+
+ def _child(self, sock, parent):
+ """Main loop for children."""
+ requestCount = 0
+
+ # Re-seed random module
+ preseed = ''
+ # urandom only exists in Python >= 2.4
+ if hasattr(os, 'urandom'):
+ try:
+ preseed = os.urandom(16)
+ except NotImplementedError:
+ pass
+ # Have doubts about this. random.seed will just hash the string
+ random.seed('%s%s%s' % (preseed, os.getpid(), time.time()))
+ del preseed
+
+ while True:
+ # Wait for any activity on the main socket or parent socket.
+ r, w, e = select.select([sock, parent], [], [])
+
+ for f in r:
+ # If there's any activity on the parent socket, it
+ # means the parent wants us to die or has died itself.
+ # Either way, exit.
+ if f is parent:
+ return
+
+ # Otherwise, there's activity on the main socket...
+ try:
+ clientSock, addr = sock.accept()
+ except socket.error, e:
+ if e[0] == errno.EAGAIN:
+ # Or maybe not.
+ continue
+ raise
+
+ setCloseOnExec(clientSock)
+
+ # Check if this client is allowed.
+ if not self._isClientAllowed(addr):
+ clientSock.close()
+ continue
+
+ # Notify parent we're no longer available.
+ self._notifyParent(parent, '\x00')
+
+ # Do the job.
+ self._jobClass(clientSock, addr, *self._jobArgs).run()
+
+ # If we've serviced the maximum number of requests, exit.
+ if self._maxRequests > 0:
+ requestCount += 1
+ if requestCount >= self._maxRequests:
+ break
+
+ # Tell parent we're free again.
+ if not self._notifyParent(parent, '\xff'):
+ return # Parent is gone.
+
+ # Signal handlers
+
+ def _hupHandler(self, signum, frame):
+ self._keepGoing = False
+ self._hupReceived = True
+
+ def _intHandler(self, signum, frame):
+ self._keepGoing = False
+
+ def _chldHandler(self, signum, frame):
+ # Do nothing (breaks us out of select and allows us to reap children).
+ pass
+
+ def _installSignalHandlers(self):
+ supportedSignals = [signal.SIGINT, signal.SIGTERM]
+ if hasattr(signal, 'SIGHUP'):
+ supportedSignals.append(signal.SIGHUP)
+
+ self._oldSIGs = [(x,signal.getsignal(x)) for x in supportedSignals]
+
+ for sig in supportedSignals:
+ if hasattr(signal, 'SIGHUP') and sig == signal.SIGHUP:
+ signal.signal(sig, self._hupHandler)
+ else:
+ signal.signal(sig, self._intHandler)
+
+ def _restoreSignalHandlers(self):
+ """Restores previous signal handlers."""
+ for signum,handler in self._oldSIGs:
+ signal.signal(signum, handler)
+
+if __name__ == '__main__':
+ class TestJob(object):
+ def __init__(self, sock, addr):
+ self._sock = sock
+ self._addr = addr
+ def run(self):
+ print "Client connection opened from %s:%d" % self._addr
+ self._sock.send('Hello World!\n')
+ self._sock.setblocking(1)
+ self._sock.recv(1)
+ self._sock.close()
+ print "Client connection closed from %s:%d" % self._addr
+ sock = socket.socket()
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.bind(('', 8080))
+ sock.listen(socket.SOMAXCONN)
+ PreforkServer(maxChildren=10, jobClass=TestJob).run(sock)
diff --git a/lib/nulib/python/nulib/ext/flup/server/scgi.py b/lib/nulib/python/nulib/ext/flup/server/scgi.py
new file mode 100644
index 0000000..aad3d7b
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/server/scgi.py
@@ -0,0 +1,190 @@
+# Copyright (c) 2005, 2006 Allan Saddi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $Id$
+
+"""
+scgi - an SCGI/WSGI gateway.
+
+For more information about SCGI and mod_scgi for Apache1/Apache2, see
+.
+
+For more information about the Web Server Gateway Interface, see
+.
+
+Example usage:
+
+ #!/usr/bin/env python
+ import sys
+ from myapplication import app # Assume app is your WSGI application object
+ from scgi import WSGIServer
+ ret = WSGIServer(app).run()
+ sys.exit(ret and 42 or 0)
+
+See the documentation for WSGIServer for more information.
+
+About the bit of logic at the end:
+Upon receiving SIGHUP, the python script will exit with status code 42. This
+can be used by a wrapper script to determine if the python script should be
+re-run. When a SIGINT or SIGTERM is received, the script exits with status
+code 0, possibly indicating a normal exit.
+
+Example wrapper script:
+
+ #!/bin/sh
+ STATUS=42
+ while test $STATUS -eq 42; do
+ python "$@" that_script_above.py
+ STATUS=$?
+ done
+"""
+
+__author__ = 'Allan Saddi '
+__version__ = '$Revision$'
+
+import logging
+import socket
+
+from flup.server.scgi_base import BaseSCGIServer, Connection, NoDefault
+from flup.server.threadedserver import ThreadedServer
+
+__all__ = ['WSGIServer']
+
+class WSGIServer(BaseSCGIServer, ThreadedServer):
+ """
+ SCGI/WSGI server. For information about SCGI (Simple Common Gateway
+ Interface), see .
+
+ This server is similar to SWAP ,
+ another SCGI/WSGI server.
+
+ It differs from SWAP in that it isn't based on scgi.scgi_server and
+ therefore, it allows me to implement concurrency using threads. (Also,
+ this server was written from scratch and really has no other depedencies.)
+ Which server to use really boils down to whether you want multithreading
+ or forking. (But as an aside, I've found scgi.scgi_server's implementation
+ of preforking to be quite superior. So if your application really doesn't
+ mind running in multiple processes, go use SWAP. ;)
+ """
+ def __init__(self, application, scriptName=NoDefault, environ=None,
+ multithreaded=True, multiprocess=False,
+ bindAddress=('localhost', 4000), umask=None,
+ allowedServers=None,
+ loggingLevel=logging.INFO, debug=True, **kw):
+ """
+ scriptName is the initial portion of the URL path that "belongs"
+ to your application. It is used to determine PATH_INFO (which doesn't
+ seem to be passed in). An empty scriptName means your application
+ is mounted at the root of your virtual host.
+
+ environ, which must be a dictionary, can contain any additional
+ environment variables you want to pass to your application.
+
+ bindAddress is the address to bind to, which must be a string or
+ a tuple of length 2. If a tuple, the first element must be a string,
+ which is the host name or IPv4 address of a local interface. The
+ 2nd element of the tuple is the port number. If a string, it will
+ be interpreted as a filename and a UNIX socket will be opened.
+
+ If binding to a UNIX socket, umask may be set to specify what
+ the umask is to be changed to before the socket is created in the
+ filesystem. After the socket is created, the previous umask is
+ restored.
+
+ allowedServers must be None or a list of strings representing the
+ IPv4 addresses of servers allowed to connect. None means accept
+ connections from anywhere.
+
+ loggingLevel sets the logging level of the module-level logger.
+ """
+ BaseSCGIServer.__init__(self, application,
+ scriptName=scriptName,
+ environ=environ,
+ multithreaded=multithreaded,
+ multiprocess=multiprocess,
+ bindAddress=bindAddress,
+ umask=umask,
+ allowedServers=allowedServers,
+ loggingLevel=loggingLevel,
+ debug=debug)
+ for key in ('jobClass', 'jobArgs'):
+ if kw.has_key(key):
+ del kw[key]
+ ThreadedServer.__init__(self, jobClass=Connection, jobArgs=(self,),
+ **kw)
+
+ def run(self):
+ """
+ Main loop. Call this after instantiating WSGIServer. SIGHUP, SIGINT,
+ SIGQUIT, SIGTERM cause it to cleanup and return. (If a SIGHUP
+ is caught, this method returns True. Returns False otherwise.)
+ """
+ self.logger.info('%s starting up', self.__class__.__name__)
+
+ try:
+ sock = self._setupSocket()
+ except socket.error, e:
+ self.logger.error('Failed to bind socket (%s), exiting', e[1])
+ return False
+
+ ret = ThreadedServer.run(self, sock)
+
+ self._cleanupSocket(sock)
+
+ self.logger.info('%s shutting down%s', self.__class__.__name__,
+ self._hupReceived and ' (reload requested)' or '')
+
+ return ret
+
+if __name__ == '__main__':
+ def test_app(environ, start_response):
+ """Probably not the most efficient example."""
+ import cgi
+ start_response('200 OK', [('Content-Type', 'text/html')])
+ yield 'Hello World!\n' \
+ '\n' \
+ '
Hello World!
\n' \
+ '
'
+ names = environ.keys()
+ names.sort()
+ for name in names:
+ yield '
%s
%s
\n' % (
+ name, cgi.escape(`environ[name]`))
+
+ form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ,
+ keep_blank_values=1)
+ if form.list:
+ yield '
Form data
'
+
+ for field in form.list:
+ yield '
%s
%s
\n' % (
+ field.name, field.value)
+
+ yield '
\n' \
+ '\n'
+
+ from wsgiref import validate
+ test_app = validate.validator(test_app)
+ WSGIServer(test_app,
+ loggingLevel=logging.DEBUG).run()
diff --git a/lib/nulib/python/nulib/ext/flup/server/scgi_base.py b/lib/nulib/python/nulib/ext/flup/server/scgi_base.py
new file mode 100644
index 0000000..cfa6662
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/server/scgi_base.py
@@ -0,0 +1,544 @@
+# Copyright (c) 2005, 2006 Allan Saddi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $Id$
+
+__author__ = 'Allan Saddi '
+__version__ = '$Revision$'
+
+import sys
+import logging
+import socket
+import select
+import errno
+import cStringIO as StringIO
+import signal
+import datetime
+import os
+import warnings
+
+# Threads are required. If you want a non-threaded (forking) version, look at
+# SWAP .
+import thread
+import threading
+
+__all__ = ['BaseSCGIServer']
+
+class NoDefault(object):
+ pass
+
+# The main classes use this name for logging.
+LoggerName = 'scgi-wsgi'
+
+# Set up module-level logger.
+console = logging.StreamHandler()
+console.setLevel(logging.DEBUG)
+console.setFormatter(logging.Formatter('%(asctime)s : %(message)s',
+ '%Y-%m-%d %H:%M:%S'))
+logging.getLogger(LoggerName).addHandler(console)
+del console
+
+class ProtocolError(Exception):
+ """
+ Exception raised when the server does something unexpected or
+ sends garbled data. Usually leads to a Connection closing.
+ """
+ pass
+
+def recvall(sock, length):
+ """
+ Attempts to receive length bytes from a socket, blocking if necessary.
+ (Socket may be blocking or non-blocking.)
+ """
+ dataList = []
+ recvLen = 0
+ while length:
+ try:
+ data = sock.recv(length)
+ except socket.error, e:
+ if e[0] == errno.EAGAIN:
+ select.select([sock], [], [])
+ continue
+ else:
+ raise
+ if not data: # EOF
+ break
+ dataList.append(data)
+ dataLen = len(data)
+ recvLen += dataLen
+ length -= dataLen
+ return ''.join(dataList), recvLen
+
+def readNetstring(sock):
+ """
+ Attempt to read a netstring from a socket.
+ """
+ # First attempt to read the length.
+ size = ''
+ while True:
+ try:
+ c = sock.recv(1)
+ except socket.error, e:
+ if e[0] == errno.EAGAIN:
+ select.select([sock], [], [])
+ continue
+ else:
+ raise
+ if c == ':':
+ break
+ if not c:
+ raise EOFError
+ size += c
+
+ # Try to decode the length.
+ try:
+ size = int(size)
+ if size < 0:
+ raise ValueError
+ except ValueError:
+ raise ProtocolError, 'invalid netstring length'
+
+ # Now read the string.
+ s, length = recvall(sock, size)
+
+ if length < size:
+ raise EOFError
+
+ # Lastly, the trailer.
+ trailer, length = recvall(sock, 1)
+
+ if length < 1:
+ raise EOFError
+
+ if trailer != ',':
+ raise ProtocolError, 'invalid netstring trailer'
+
+ return s
+
+class StdoutWrapper(object):
+ """
+ Wrapper for sys.stdout so we know if data has actually been written.
+ """
+ def __init__(self, stdout):
+ self._file = stdout
+ self.dataWritten = False
+
+ def write(self, data):
+ if data:
+ self.dataWritten = True
+ self._file.write(data)
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+ def __getattr__(self, name):
+ return getattr(self._file, name)
+
+class Request(object):
+ """
+ Encapsulates data related to a single request.
+
+ Public attributes:
+ environ - Environment variables from web server.
+ stdin - File-like object representing the request body.
+ stdout - File-like object for writing the response.
+ """
+ def __init__(self, conn, environ, input, output):
+ self._conn = conn
+ self.environ = environ
+ self.stdin = input
+ self.stdout = StdoutWrapper(output)
+
+ self.logger = logging.getLogger(LoggerName)
+
+ def run(self):
+ self.logger.info('%s %s%s',
+ self.environ['REQUEST_METHOD'],
+ self.environ.get('SCRIPT_NAME', ''),
+ self.environ.get('PATH_INFO', ''))
+
+ start = datetime.datetime.now()
+
+ try:
+ self._conn.server.handler(self)
+ except:
+ self.logger.exception('Exception caught from handler')
+ if not self.stdout.dataWritten:
+ self._conn.server.error(self)
+
+ end = datetime.datetime.now()
+
+ handlerTime = end - start
+ self.logger.debug('%s %s%s done (%.3f secs)',
+ self.environ['REQUEST_METHOD'],
+ self.environ.get('SCRIPT_NAME', ''),
+ self.environ.get('PATH_INFO', ''),
+ handlerTime.seconds +
+ handlerTime.microseconds / 1000000.0)
+
+class Connection(object):
+ """
+ Represents a single client (web server) connection. A single request
+ is handled, after which the socket is closed.
+ """
+ def __init__(self, sock, addr, server):
+ self._sock = sock
+ self._addr = addr
+ self.server = server
+
+ self.logger = logging.getLogger(LoggerName)
+
+ def run(self):
+ if len(self._addr) == 2:
+ self.logger.debug('Connection starting up (%s:%d)',
+ self._addr[0], self._addr[1])
+
+ try:
+ self.processInput()
+ except (EOFError, KeyboardInterrupt):
+ pass
+ except ProtocolError, e:
+ self.logger.error("Protocol error '%s'", str(e))
+ except:
+ self.logger.exception('Exception caught in Connection')
+
+ if len(self._addr) == 2:
+ self.logger.debug('Connection shutting down (%s:%d)',
+ self._addr[0], self._addr[1])
+
+ # All done!
+ self._sock.close()
+
+ def processInput(self):
+ # Read headers
+ headers = readNetstring(self._sock)
+ headers = headers.split('\x00')[:-1]
+ if len(headers) % 2 != 0:
+ raise ProtocolError, 'invalid headers'
+ environ = {}
+ for i in range(len(headers) / 2):
+ environ[headers[2*i]] = headers[2*i+1]
+
+ clen = environ.get('CONTENT_LENGTH')
+ if clen is None:
+ raise ProtocolError, 'missing CONTENT_LENGTH'
+ try:
+ clen = int(clen)
+ if clen < 0:
+ raise ValueError
+ except ValueError:
+ raise ProtocolError, 'invalid CONTENT_LENGTH'
+
+ self._sock.setblocking(1)
+ if clen:
+ input = self._sock.makefile('r')
+ else:
+ # Empty input.
+ input = StringIO.StringIO()
+
+ # stdout
+ output = self._sock.makefile('w')
+
+ # Allocate Request
+ req = Request(self, environ, input, output)
+
+ # Run it.
+ req.run()
+
+ output.close()
+ input.close()
+
+class BaseSCGIServer(object):
+ # What Request class to use.
+ requestClass = Request
+
+ def __init__(self, application, scriptName=NoDefault, environ=None,
+ multithreaded=True, multiprocess=False,
+ bindAddress=('localhost', 4000), umask=None,
+ allowedServers=NoDefault,
+ loggingLevel=logging.INFO, debug=True):
+ """
+ scriptName is the initial portion of the URL path that "belongs"
+ to your application. It is used to determine PATH_INFO (which doesn't
+ seem to be passed in). An empty scriptName means your application
+ is mounted at the root of your virtual host.
+
+ environ, which must be a dictionary, can contain any additional
+ environment variables you want to pass to your application.
+
+ Set multithreaded to False if your application is not thread-safe.
+
+ Set multiprocess to True to explicitly set wsgi.multiprocess to
+ True. (Only makes sense with threaded servers.)
+
+ bindAddress is the address to bind to, which must be a string or
+ a tuple of length 2. If a tuple, the first element must be a string,
+ which is the host name or IPv4 address of a local interface. The
+ 2nd element of the tuple is the port number. If a string, it will
+ be interpreted as a filename and a UNIX socket will be opened.
+
+ If binding to a UNIX socket, umask may be set to specify what
+ the umask is to be changed to before the socket is created in the
+ filesystem. After the socket is created, the previous umask is
+ restored.
+
+ allowedServers must be None or a list of strings representing the
+ IPv4 addresses of servers allowed to connect. None means accept
+ connections from anywhere. By default, it is a list containing
+ the single item '127.0.0.1'.
+
+ loggingLevel sets the logging level of the module-level logger.
+ """
+ if environ is None:
+ environ = {}
+
+ self.application = application
+ self.scriptName = scriptName
+ self.environ = environ
+ self.multithreaded = multithreaded
+ self.multiprocess = multiprocess
+ self.debug = debug
+ self._bindAddress = bindAddress
+ self._umask = umask
+ if allowedServers is NoDefault:
+ allowedServers = ['127.0.0.1']
+ self._allowedServers = allowedServers
+
+ # Used to force single-threadedness.
+ self._appLock = thread.allocate_lock()
+
+ self.logger = logging.getLogger(LoggerName)
+ self.logger.setLevel(loggingLevel)
+
+ def _setupSocket(self):
+ """Creates and binds the socket for communication with the server."""
+ oldUmask = None
+ if type(self._bindAddress) is str:
+ # Unix socket
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ try:
+ os.unlink(self._bindAddress)
+ except OSError:
+ pass
+ if self._umask is not None:
+ oldUmask = os.umask(self._umask)
+ else:
+ # INET socket
+ assert type(self._bindAddress) is tuple
+ assert len(self._bindAddress) == 2
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+
+ sock.bind(self._bindAddress)
+ sock.listen(socket.SOMAXCONN)
+
+ if oldUmask is not None:
+ os.umask(oldUmask)
+
+ return sock
+
+ def _cleanupSocket(self, sock):
+ """Closes the main socket."""
+ sock.close()
+
+ def _isClientAllowed(self, addr):
+ ret = self._allowedServers is None or \
+ len(addr) != 2 or \
+ (len(addr) == 2 and addr[0] in self._allowedServers)
+ if not ret:
+ self.logger.warning('Server connection from %s disallowed',
+ addr[0])
+ return ret
+
+ def handler(self, request):
+ """
+ WSGI handler. Sets up WSGI environment, calls the application,
+ and sends the application's response.
+ """
+ environ = request.environ
+ environ.update(self.environ)
+
+ environ['wsgi.version'] = (1,0)
+ environ['wsgi.input'] = request.stdin
+ environ['wsgi.errors'] = sys.stderr
+ environ['wsgi.multithread'] = self.multithreaded
+ environ['wsgi.multiprocess'] = self.multiprocess
+ environ['wsgi.run_once'] = False
+
+ if environ.get('HTTPS', 'off') in ('on', '1'):
+ environ['wsgi.url_scheme'] = 'https'
+ else:
+ environ['wsgi.url_scheme'] = 'http'
+
+ self._sanitizeEnv(environ)
+
+ headers_set = []
+ headers_sent = []
+ result = None
+
+ def write(data):
+ assert type(data) is str, 'write() argument must be string'
+ assert headers_set, 'write() before start_response()'
+
+ if not headers_sent:
+ status, responseHeaders = headers_sent[:] = headers_set
+ found = False
+ for header,value in responseHeaders:
+ if header.lower() == 'content-length':
+ found = True
+ break
+ if not found and result is not None:
+ try:
+ if len(result) == 1:
+ responseHeaders.append(('Content-Length',
+ str(len(data))))
+ except:
+ pass
+ s = 'Status: %s\r\n' % status
+ for header in responseHeaders:
+ s += '%s: %s\r\n' % header
+ s += '\r\n'
+ request.stdout.write(s)
+
+ request.stdout.write(data)
+ request.stdout.flush()
+
+ def start_response(status, response_headers, exc_info=None):
+ if exc_info:
+ try:
+ if headers_sent:
+ # Re-raise if too late
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ exc_info = None # avoid dangling circular ref
+ else:
+ assert not headers_set, 'Headers already set!'
+
+ assert type(status) is str, 'Status must be a string'
+ assert len(status) >= 4, 'Status must be at least 4 characters'
+ assert int(status[:3]), 'Status must begin with 3-digit code'
+ assert status[3] == ' ', 'Status must have a space after code'
+ assert type(response_headers) is list, 'Headers must be a list'
+ if __debug__:
+ for name,val in response_headers:
+ assert type(name) is str, 'Header name "%s" must be a string' % name
+ assert type(val) is str, 'Value of header "%s" must be a string' % name
+
+ headers_set[:] = [status, response_headers]
+ return write
+
+ if not self.multithreaded:
+ self._appLock.acquire()
+ try:
+ try:
+ result = self.application(environ, start_response)
+ try:
+ for data in result:
+ if data:
+ write(data)
+ if not headers_sent:
+ write('') # in case body was empty
+ finally:
+ if hasattr(result, 'close'):
+ result.close()
+ except socket.error, e:
+ if e[0] != errno.EPIPE:
+ raise # Don't let EPIPE propagate beyond server
+ finally:
+ if not self.multithreaded:
+ self._appLock.release()
+
+ def _sanitizeEnv(self, environ):
+ """Fill-in/deduce missing values in environ."""
+ reqUri = None
+ if environ.has_key('REQUEST_URI'):
+ reqUri = environ['REQUEST_URI'].split('?', 1)
+
+ # Ensure QUERY_STRING exists
+ if not environ.has_key('QUERY_STRING') or not environ['QUERY_STRING']:
+ if reqUri is not None and len(reqUri) > 1:
+ environ['QUERY_STRING'] = reqUri[1]
+ else:
+ environ['QUERY_STRING'] = ''
+
+ # Check WSGI_SCRIPT_NAME
+ scriptName = environ.get('WSGI_SCRIPT_NAME')
+ if scriptName is None:
+ scriptName = self.scriptName
+ else:
+ warnings.warn('WSGI_SCRIPT_NAME environment variable for scgi '
+ 'servers is deprecated',
+ DeprecationWarning)
+ if scriptName.lower() == 'none':
+ scriptName = None
+
+ if scriptName is None:
+ # Do nothing (most likely coming from cgi2scgi)
+ return
+
+ if scriptName is NoDefault:
+ # Pull SCRIPT_NAME/PATH_INFO from environment, with empty defaults
+ if not environ.has_key('SCRIPT_NAME'):
+ environ['SCRIPT_INFO'] = ''
+ if not environ.has_key('PATH_INFO') or not environ['PATH_INFO']:
+ if reqUri is not None:
+ environ['PATH_INFO'] = reqUri[0]
+ else:
+ environ['PATH_INFO'] = ''
+ else:
+ # Configured scriptName
+ warnings.warn('Configured SCRIPT_NAME is deprecated\n'
+ 'Do not use WSGI_SCRIPT_NAME or the scriptName\n'
+ 'keyword parameter -- they will be going away',
+ DeprecationWarning)
+
+ value = environ['SCRIPT_NAME']
+ value += environ.get('PATH_INFO', '')
+ if not value.startswith(scriptName):
+ self.logger.warning('scriptName does not match request URI')
+
+ environ['PATH_INFO'] = value[len(scriptName):]
+ environ['SCRIPT_NAME'] = scriptName
+
+ def error(self, request):
+ """
+ Override to provide custom error handling. Ideally, however,
+ all errors should be caught at the application level.
+ """
+ if self.debug:
+ import cgitb
+ request.stdout.write('Content-Type: text/html\r\n\r\n' +
+ cgitb.html(sys.exc_info()))
+ else:
+ errorpage = """
+
+Unhandled Exception
+
+
Unhandled Exception
+
An unhandled exception was thrown by the application.
+
+"""
+ request.stdout.write('Content-Type: text/html\r\n\r\n' +
+ errorpage)
diff --git a/lib/nulib/python/nulib/ext/flup/server/scgi_fork.py b/lib/nulib/python/nulib/ext/flup/server/scgi_fork.py
new file mode 100644
index 0000000..1c7506f
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/server/scgi_fork.py
@@ -0,0 +1,188 @@
+# Copyright (c) 2005, 2006 Allan Saddi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $Id$
+
+"""
+scgi - an SCGI/WSGI gateway.
+
+For more information about SCGI and mod_scgi for Apache1/Apache2, see
+.
+
+For more information about the Web Server Gateway Interface, see
+.
+
+Example usage:
+
+ #!/usr/bin/env python
+ import sys
+ from myapplication import app # Assume app is your WSGI application object
+ from scgi import WSGIServer
+ ret = WSGIServer(app).run()
+ sys.exit(ret and 42 or 0)
+
+See the documentation for WSGIServer for more information.
+
+About the bit of logic at the end:
+Upon receiving SIGHUP, the python script will exit with status code 42. This
+can be used by a wrapper script to determine if the python script should be
+re-run. When a SIGINT or SIGTERM is received, the script exits with status
+code 0, possibly indicating a normal exit.
+
+Example wrapper script:
+
+ #!/bin/sh
+ STATUS=42
+ while test $STATUS -eq 42; do
+ python "$@" that_script_above.py
+ STATUS=$?
+ done
+"""
+
+__author__ = 'Allan Saddi '
+__version__ = '$Revision$'
+
+import logging
+import socket
+
+from flup.server.scgi_base import BaseSCGIServer, Connection, NoDefault
+from flup.server.preforkserver import PreforkServer
+
+__all__ = ['WSGIServer']
+
+class WSGIServer(BaseSCGIServer, PreforkServer):
+ """
+ SCGI/WSGI server. For information about SCGI (Simple Common Gateway
+ Interface), see .
+
+ This server is similar to SWAP ,
+ another SCGI/WSGI server.
+
+ It differs from SWAP in that it isn't based on scgi.scgi_server and
+ therefore, it allows me to implement concurrency using threads. (Also,
+ this server was written from scratch and really has no other depedencies.)
+ Which server to use really boils down to whether you want multithreading
+ or forking. (But as an aside, I've found scgi.scgi_server's implementation
+ of preforking to be quite superior. So if your application really doesn't
+ mind running in multiple processes, go use SWAP. ;)
+ """
+ def __init__(self, application, scriptName=NoDefault, environ=None,
+ bindAddress=('localhost', 4000), umask=None,
+ allowedServers=None,
+ loggingLevel=logging.INFO, debug=True, **kw):
+ """
+ scriptName is the initial portion of the URL path that "belongs"
+ to your application. It is used to determine PATH_INFO (which doesn't
+ seem to be passed in). An empty scriptName means your application
+ is mounted at the root of your virtual host.
+
+ environ, which must be a dictionary, can contain any additional
+ environment variables you want to pass to your application.
+
+ bindAddress is the address to bind to, which must be a string or
+ a tuple of length 2. If a tuple, the first element must be a string,
+ which is the host name or IPv4 address of a local interface. The
+ 2nd element of the tuple is the port number. If a string, it will
+ be interpreted as a filename and a UNIX socket will be opened.
+
+ If binding to a UNIX socket, umask may be set to specify what
+ the umask is to be changed to before the socket is created in the
+ filesystem. After the socket is created, the previous umask is
+ restored.
+
+ allowedServers must be None or a list of strings representing the
+ IPv4 addresses of servers allowed to connect. None means accept
+ connections from anywhere.
+
+ loggingLevel sets the logging level of the module-level logger.
+ """
+ BaseSCGIServer.__init__(self, application,
+ scriptName=scriptName,
+ environ=environ,
+ multithreaded=False,
+ multiprocess=True,
+ bindAddress=bindAddress,
+ umask=umask,
+ allowedServers=allowedServers,
+ loggingLevel=loggingLevel,
+ debug=debug)
+ for key in ('multithreaded', 'multiprocess', 'jobClass', 'jobArgs'):
+ if kw.has_key(key):
+ del kw[key]
+ PreforkServer.__init__(self, jobClass=Connection, jobArgs=(self,), **kw)
+
+ def run(self):
+ """
+ Main loop. Call this after instantiating WSGIServer. SIGHUP, SIGINT,
+ SIGQUIT, SIGTERM cause it to cleanup and return. (If a SIGHUP
+ is caught, this method returns True. Returns False otherwise.)
+ """
+ self.logger.info('%s starting up', self.__class__.__name__)
+
+ try:
+ sock = self._setupSocket()
+ except socket.error, e:
+ self.logger.error('Failed to bind socket (%s), exiting', e[1])
+ return False
+
+ ret = PreforkServer.run(self, sock)
+
+ self._cleanupSocket(sock)
+
+ self.logger.info('%s shutting down%s', self.__class__.__name__,
+ self._hupReceived and ' (reload requested)' or '')
+
+ return ret
+
+if __name__ == '__main__':
+ def test_app(environ, start_response):
+ """Probably not the most efficient example."""
+ import cgi
+ start_response('200 OK', [('Content-Type', 'text/html')])
+ yield 'Hello World!\n' \
+ '\n' \
+ '
Hello World!
\n' \
+ '
'
+ names = environ.keys()
+ names.sort()
+ for name in names:
+ yield '
%s
%s
\n' % (
+ name, cgi.escape(`environ[name]`))
+
+ form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ,
+ keep_blank_values=1)
+ if form.list:
+ yield '
Form data
'
+
+ for field in form.list:
+ yield '
%s
%s
\n' % (
+ field.name, field.value)
+
+ yield '
\n' \
+ '\n'
+
+ from wsgiref import validate
+ test_app = validate.validator(test_app)
+ WSGIServer(test_app,
+ loggingLevel=logging.DEBUG).run()
diff --git a/lib/nulib/python/nulib/ext/flup/server/singleserver.py b/lib/nulib/python/nulib/ext/flup/server/singleserver.py
new file mode 100644
index 0000000..59fa6ea
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/server/singleserver.py
@@ -0,0 +1,166 @@
+# Copyright (c) 2005 Allan Saddi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $Id$
+
+__author__ = 'Allan Saddi '
+__version__ = '$Revision$'
+
+import sys
+import socket
+import select
+import signal
+import errno
+
+try:
+ import fcntl
+except ImportError:
+ def setCloseOnExec(sock):
+ pass
+else:
+ def setCloseOnExec(sock):
+ fcntl.fcntl(sock.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC)
+
+__all__ = ['SingleServer']
+
+class SingleServer(object):
+ def __init__(self, jobClass=None, jobArgs=(), **kw):
+ self._jobClass = jobClass
+ self._jobArgs = jobArgs
+
+ def run(self, sock, timeout=1.0):
+ """
+ The main loop. Pass a socket that is ready to accept() client
+ connections. Return value will be True or False indiciating whether
+ or not the loop was exited due to SIGHUP.
+ """
+ # Set up signal handlers.
+ self._keepGoing = True
+ self._hupReceived = False
+
+ # Might need to revisit this?
+ if not sys.platform.startswith('win'):
+ self._installSignalHandlers()
+
+ # Set close-on-exec
+ setCloseOnExec(sock)
+
+ # Main loop.
+ while self._keepGoing:
+ try:
+ r, w, e = select.select([sock], [], [], timeout)
+ except select.error, e:
+ if e[0] == errno.EINTR:
+ continue
+ raise
+
+ if r:
+ try:
+ clientSock, addr = sock.accept()
+ except socket.error, e:
+ if e[0] in (errno.EINTR, errno.EAGAIN):
+ continue
+ raise
+
+ setCloseOnExec(clientSock)
+
+ if not self._isClientAllowed(addr):
+ clientSock.close()
+ continue
+
+ # Hand off to Connection.
+ conn = self._jobClass(clientSock, addr, *self._jobArgs)
+ conn.run()
+
+ self._mainloopPeriodic()
+
+ # Restore signal handlers.
+ self._restoreSignalHandlers()
+
+ # Return bool based on whether or not SIGHUP was received.
+ return self._hupReceived
+
+ def _mainloopPeriodic(self):
+ """
+ Called with just about each iteration of the main loop. Meant to
+ be overridden.
+ """
+ pass
+
+ def _exit(self, reload=False):
+ """
+ Protected convenience method for subclasses to force an exit. Not
+ really thread-safe, which is why it isn't public.
+ """
+ if self._keepGoing:
+ self._keepGoing = False
+ self._hupReceived = reload
+
+ def _isClientAllowed(self, addr):
+ """Override to provide access control."""
+ return True
+
+ # Signal handlers
+
+ def _hupHandler(self, signum, frame):
+ self._hupReceived = True
+ self._keepGoing = False
+
+ def _intHandler(self, signum, frame):
+ self._keepGoing = False
+
+ def _installSignalHandlers(self):
+ supportedSignals = [signal.SIGINT, signal.SIGTERM]
+ if hasattr(signal, 'SIGHUP'):
+ supportedSignals.append(signal.SIGHUP)
+
+ self._oldSIGs = [(x,signal.getsignal(x)) for x in supportedSignals]
+
+ for sig in supportedSignals:
+ if hasattr(signal, 'SIGHUP') and sig == signal.SIGHUP:
+ signal.signal(sig, self._hupHandler)
+ else:
+ signal.signal(sig, self._intHandler)
+
+ def _restoreSignalHandlers(self):
+ for signum,handler in self._oldSIGs:
+ signal.signal(signum, handler)
+
+if __name__ == '__main__':
+ class TestJob(object):
+ def __init__(self, sock, addr):
+ self._sock = sock
+ self._addr = addr
+ def run(self):
+ print "Client connection opened from %s:%d" % self._addr
+ self._sock.send('Hello World!\n')
+ self._sock.setblocking(1)
+ self._sock.recv(1)
+ self._sock.close()
+ print "Client connection closed from %s:%d" % self._addr
+ sock = socket.socket()
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.bind(('', 8080))
+ sock.listen(socket.SOMAXCONN)
+ SingleServer(jobClass=TestJob).run(sock)
diff --git a/lib/nulib/python/nulib/ext/flup/server/threadedserver.py b/lib/nulib/python/nulib/ext/flup/server/threadedserver.py
new file mode 100644
index 0000000..c232347
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/server/threadedserver.py
@@ -0,0 +1,175 @@
+# Copyright (c) 2005 Allan Saddi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $Id$
+
+__author__ = 'Allan Saddi '
+__version__ = '$Revision$'
+
+import sys
+import socket
+import select
+import signal
+import errno
+
+try:
+ import fcntl
+except ImportError:
+ def setCloseOnExec(sock):
+ pass
+else:
+ def setCloseOnExec(sock):
+ fcntl.fcntl(sock.fileno(), fcntl.F_SETFD, fcntl.FD_CLOEXEC)
+
+from flup.server.threadpool import ThreadPool
+
+__all__ = ['ThreadedServer']
+
+class ThreadedServer(object):
+ def __init__(self, jobClass=None, jobArgs=(), **kw):
+ self._jobClass = jobClass
+ self._jobArgs = jobArgs
+
+ self._threadPool = ThreadPool(**kw)
+
+ def run(self, sock, timeout=1.0):
+ """
+ The main loop. Pass a socket that is ready to accept() client
+ connections. Return value will be True or False indiciating whether
+ or not the loop was exited due to SIGHUP.
+ """
+ # Set up signal handlers.
+ self._keepGoing = True
+ self._hupReceived = False
+
+ # Might need to revisit this?
+ if not sys.platform.startswith('win'):
+ self._installSignalHandlers()
+
+ # Set close-on-exec
+ setCloseOnExec(sock)
+
+ # Main loop.
+ while self._keepGoing:
+ try:
+ r, w, e = select.select([sock], [], [], timeout)
+ except select.error, e:
+ if e[0] == errno.EINTR:
+ continue
+ raise
+
+ if r:
+ try:
+ clientSock, addr = sock.accept()
+ except socket.error, e:
+ if e[0] in (errno.EINTR, errno.EAGAIN):
+ continue
+ raise
+
+ setCloseOnExec(clientSock)
+
+ if not self._isClientAllowed(addr):
+ clientSock.close()
+ continue
+
+ # Hand off to Connection.
+ conn = self._jobClass(clientSock, addr, *self._jobArgs)
+ if not self._threadPool.addJob(conn, allowQueuing=False):
+ # No thread left, immediately close the socket to hopefully
+ # indicate to the web server that we're at our limit...
+ # and to prevent having too many opened (and useless)
+ # files.
+ clientSock.close()
+
+ self._mainloopPeriodic()
+
+ # Restore signal handlers.
+ self._restoreSignalHandlers()
+
+ # Return bool based on whether or not SIGHUP was received.
+ return self._hupReceived
+
+ def _mainloopPeriodic(self):
+ """
+ Called with just about each iteration of the main loop. Meant to
+ be overridden.
+ """
+ pass
+
+ def _exit(self, reload=False):
+ """
+ Protected convenience method for subclasses to force an exit. Not
+ really thread-safe, which is why it isn't public.
+ """
+ if self._keepGoing:
+ self._keepGoing = False
+ self._hupReceived = reload
+
+ def _isClientAllowed(self, addr):
+ """Override to provide access control."""
+ return True
+
+ # Signal handlers
+
+ def _hupHandler(self, signum, frame):
+ self._hupReceived = True
+ self._keepGoing = False
+
+ def _intHandler(self, signum, frame):
+ self._keepGoing = False
+
+ def _installSignalHandlers(self):
+ supportedSignals = [signal.SIGINT, signal.SIGTERM]
+ if hasattr(signal, 'SIGHUP'):
+ supportedSignals.append(signal.SIGHUP)
+
+ self._oldSIGs = [(x,signal.getsignal(x)) for x in supportedSignals]
+
+ for sig in supportedSignals:
+ if hasattr(signal, 'SIGHUP') and sig == signal.SIGHUP:
+ signal.signal(sig, self._hupHandler)
+ else:
+ signal.signal(sig, self._intHandler)
+
+ def _restoreSignalHandlers(self):
+ for signum,handler in self._oldSIGs:
+ signal.signal(signum, handler)
+
+if __name__ == '__main__':
+ class TestJob(object):
+ def __init__(self, sock, addr):
+ self._sock = sock
+ self._addr = addr
+ def run(self):
+ print "Client connection opened from %s:%d" % self._addr
+ self._sock.send('Hello World!\n')
+ self._sock.setblocking(1)
+ self._sock.recv(1)
+ self._sock.close()
+ print "Client connection closed from %s:%d" % self._addr
+ sock = socket.socket()
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.bind(('', 8080))
+ sock.listen(socket.SOMAXCONN)
+ ThreadedServer(maxThreads=10, jobClass=TestJob).run(sock)
diff --git a/lib/nulib/python/nulib/ext/flup/server/threadpool.py b/lib/nulib/python/nulib/ext/flup/server/threadpool.py
new file mode 100644
index 0000000..a61885d
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/flup/server/threadpool.py
@@ -0,0 +1,121 @@
+# Copyright (c) 2005 Allan Saddi
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $Id$
+
+__author__ = 'Allan Saddi '
+__version__ = '$Revision$'
+
+import sys
+import thread
+import threading
+
+class ThreadPool(object):
+ """
+ Thread pool that maintains the number of idle threads between
+ minSpare and maxSpare inclusive. By default, there is no limit on
+ the number of threads that can be started, but this can be controlled
+ by maxThreads.
+ """
+ def __init__(self, minSpare=1, maxSpare=5, maxThreads=sys.maxint):
+ self._minSpare = minSpare
+ self._maxSpare = maxSpare
+ self._maxThreads = max(minSpare, maxThreads)
+
+ self._lock = threading.Condition()
+ self._workQueue = []
+ self._idleCount = self._workerCount = maxSpare
+
+ # Start the minimum number of worker threads.
+ for i in range(maxSpare):
+ thread.start_new_thread(self._worker, ())
+
+ def addJob(self, job, allowQueuing=True):
+ """
+ Adds a job to the work queue. The job object should have a run()
+ method. If allowQueuing is True (the default), the job will be
+ added to the work queue regardless if there are any idle threads
+ ready. (The only way for there to be no idle threads is if maxThreads
+ is some reasonable, finite limit.)
+
+ Otherwise, if allowQueuing is False, and there are no more idle
+ threads, the job will not be queued.
+
+ Returns True if the job was queued, False otherwise.
+ """
+ self._lock.acquire()
+ try:
+ # Maintain minimum number of spares.
+ while self._idleCount < self._minSpare and \
+ self._workerCount < self._maxThreads:
+ self._workerCount += 1
+ self._idleCount += 1
+ thread.start_new_thread(self._worker, ())
+
+ # Hand off the job.
+ if self._idleCount or allowQueuing:
+ self._workQueue.append(job)
+ self._lock.notify()
+ return True
+ else:
+ return False
+ finally:
+ self._lock.release()
+
+ def _worker(self):
+ """
+ Worker thread routine. Waits for a job, executes it, repeat.
+ """
+ self._lock.acquire()
+ while True:
+ while not self._workQueue:
+ self._lock.wait()
+
+ # We have a job to do...
+ job = self._workQueue.pop(0)
+
+ assert self._idleCount > 0
+ self._idleCount -= 1
+
+ self._lock.release()
+
+ try:
+ job.run()
+ except:
+ # FIXME: This should really be reported somewhere.
+ # But we can't simply report it to stderr because of fcgi
+ pass
+
+ self._lock.acquire()
+
+ if self._idleCount == self._maxSpare:
+ break # NB: lock still held
+ self._idleCount += 1
+ assert self._idleCount <= self._maxSpare
+
+ # Die off...
+ assert self._workerCount > self._maxSpare
+ self._workerCount -= 1
+
+ self._lock.release()
diff --git a/lib/nulib/python/nulib/ext/web/__init__.py b/lib/nulib/python/nulib/ext/web/__init__.py
new file mode 100644
index 0000000..670dacb
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/__init__.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+"""web.py: makes web apps (http://webpy.org)"""
+
+from __future__ import generators
+
+__version__ = "0.38"
+__author__ = [
+ "Aaron Swartz ",
+ "Anand Chitipothu "
+]
+__license__ = "public domain"
+__contributors__ = "see http://webpy.org/changes"
+
+import utils, db, net, wsgi, http, webapi, httpserver, debugerror
+import template, form
+
+import session
+
+from utils import *
+from db import *
+from net import *
+from wsgi import *
+from http import *
+from webapi import *
+from httpserver import *
+from debugerror import *
+from application import *
+from browser import *
+try:
+ import webopenid as openid
+except ImportError:
+ pass # requires openid module
+
diff --git a/lib/nulib/python/nulib/ext/web/application.py b/lib/nulib/python/nulib/ext/web/application.py
new file mode 100644
index 0000000..668d11a
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/application.py
@@ -0,0 +1,740 @@
+"""
+Web application
+(from web.py)
+"""
+import webapi as web
+import webapi, wsgi, utils
+import debugerror
+import httpserver
+
+from utils import lstrips, safeunicode
+import sys
+
+import urllib
+import traceback
+import itertools
+import os
+import types
+from exceptions import SystemExit
+
+try:
+ import wsgiref.handlers
+except ImportError:
+ pass # don't break people with old Pythons
+
+__all__ = [
+ "application", "auto_application",
+ "subdir_application", "subdomain_application",
+ "loadhook", "unloadhook",
+ "autodelegate"
+]
+
+class application:
+ """
+ Application to delegate requests based on path.
+
+ >>> urls = ("/hello", "hello")
+ >>> app = application(urls, globals())
+ >>> class hello:
+ ... def GET(self): return "hello"
+ >>>
+ >>> app.request("/hello").data
+ 'hello'
+ """
+ def __init__(self, mapping=(), fvars={}, autoreload=None):
+ if autoreload is None:
+ autoreload = web.config.get('debug', False)
+ self.init_mapping(mapping)
+ self.fvars = fvars
+ self.processors = []
+
+ self.add_processor(loadhook(self._load))
+ self.add_processor(unloadhook(self._unload))
+
+ if autoreload:
+ def main_module_name():
+ mod = sys.modules['__main__']
+ file = getattr(mod, '__file__', None) # make sure this works even from python interpreter
+ return file and os.path.splitext(os.path.basename(file))[0]
+
+ def modname(fvars):
+ """find name of the module name from fvars."""
+ file, name = fvars.get('__file__'), fvars.get('__name__')
+ if file is None or name is None:
+ return None
+
+ if name == '__main__':
+ # Since the __main__ module can't be reloaded, the module has
+ # to be imported using its file name.
+ name = main_module_name()
+ return name
+
+ mapping_name = utils.dictfind(fvars, mapping)
+ module_name = modname(fvars)
+
+ def reload_mapping():
+ """loadhook to reload mapping and fvars."""
+ mod = __import__(module_name, None, None, [''])
+ mapping = getattr(mod, mapping_name, None)
+ if mapping:
+ self.fvars = mod.__dict__
+ self.init_mapping(mapping)
+
+ self.add_processor(loadhook(Reloader()))
+ if mapping_name and module_name:
+ self.add_processor(loadhook(reload_mapping))
+
+ # load __main__ module usings its filename, so that it can be reloaded.
+ if main_module_name() and '__main__' in sys.argv:
+ try:
+ __import__(main_module_name())
+ except ImportError:
+ pass
+
+ def _load(self):
+ web.ctx.app_stack.append(self)
+
+ def _unload(self):
+ web.ctx.app_stack = web.ctx.app_stack[:-1]
+
+ if web.ctx.app_stack:
+ # this is a sub-application, revert ctx to earlier state.
+ oldctx = web.ctx.get('_oldctx')
+ if oldctx:
+ web.ctx.home = oldctx.home
+ web.ctx.homepath = oldctx.homepath
+ web.ctx.path = oldctx.path
+ web.ctx.fullpath = oldctx.fullpath
+
+ def _cleanup(self):
+ # Threads can be recycled by WSGI servers.
+ # Clearing up all thread-local state to avoid interefereing with subsequent requests.
+ utils.ThreadedDict.clear_all()
+
+ def init_mapping(self, mapping):
+ self.mapping = list(utils.group(mapping, 2))
+
+ def add_mapping(self, pattern, classname):
+ self.mapping.append((pattern, classname))
+
+ def add_processor(self, processor):
+ """
+ Adds a processor to the application.
+
+ >>> urls = ("/(.*)", "echo")
+ >>> app = application(urls, globals())
+ >>> class echo:
+ ... def GET(self, name): return name
+ ...
+ >>>
+ >>> def hello(handler): return "hello, " + handler()
+ ...
+ >>> app.add_processor(hello)
+ >>> app.request("/web.py").data
+ 'hello, web.py'
+ """
+ self.processors.append(processor)
+
+ def request(self, localpart='/', method='GET', data=None,
+ host="0.0.0.0:8080", headers=None, https=False, **kw):
+ """Makes request to this application for the specified path and method.
+ Response will be a storage object with data, status and headers.
+
+ >>> urls = ("/hello", "hello")
+ >>> app = application(urls, globals())
+ >>> class hello:
+ ... def GET(self):
+ ... web.header('Content-Type', 'text/plain')
+ ... return "hello"
+ ...
+ >>> response = app.request("/hello")
+ >>> response.data
+ 'hello'
+ >>> response.status
+ '200 OK'
+ >>> response.headers['Content-Type']
+ 'text/plain'
+
+ To use https, use https=True.
+
+ >>> urls = ("/redirect", "redirect")
+ >>> app = application(urls, globals())
+ >>> class redirect:
+ ... def GET(self): raise web.seeother("/foo")
+ ...
+ >>> response = app.request("/redirect")
+ >>> response.headers['Location']
+ 'http://0.0.0.0:8080/foo'
+ >>> response = app.request("/redirect", https=True)
+ >>> response.headers['Location']
+ 'https://0.0.0.0:8080/foo'
+
+ The headers argument specifies HTTP headers as a mapping object
+ such as a dict.
+
+ >>> urls = ('/ua', 'uaprinter')
+ >>> class uaprinter:
+ ... def GET(self):
+ ... return 'your user-agent is ' + web.ctx.env['HTTP_USER_AGENT']
+ ...
+ >>> app = application(urls, globals())
+ >>> app.request('/ua', headers = {
+ ... 'User-Agent': 'a small jumping bean/1.0 (compatible)'
+ ... }).data
+ 'your user-agent is a small jumping bean/1.0 (compatible)'
+
+ """
+ path, maybe_query = urllib.splitquery(localpart)
+ query = maybe_query or ""
+
+ if 'env' in kw:
+ env = kw['env']
+ else:
+ env = {}
+ env = dict(env, HTTP_HOST=host, REQUEST_METHOD=method, PATH_INFO=path, QUERY_STRING=query, HTTPS=str(https))
+ headers = headers or {}
+
+ for k, v in headers.items():
+ env['HTTP_' + k.upper().replace('-', '_')] = v
+
+ if 'HTTP_CONTENT_LENGTH' in env:
+ env['CONTENT_LENGTH'] = env.pop('HTTP_CONTENT_LENGTH')
+
+ if 'HTTP_CONTENT_TYPE' in env:
+ env['CONTENT_TYPE'] = env.pop('HTTP_CONTENT_TYPE')
+
+ if method not in ["HEAD", "GET"]:
+ data = data or ''
+ import StringIO
+ if isinstance(data, dict):
+ q = urllib.urlencode(data)
+ else:
+ q = data
+ env['wsgi.input'] = StringIO.StringIO(q)
+ if not env.get('CONTENT_TYPE', '').lower().startswith('multipart/') and 'CONTENT_LENGTH' not in env:
+ env['CONTENT_LENGTH'] = len(q)
+ response = web.storage()
+ def start_response(status, headers):
+ response.status = status
+ response.headers = dict(headers)
+ response.header_items = headers
+ response.data = "".join(self.wsgifunc()(env, start_response))
+ return response
+
+ def browser(self):
+ import browser
+ return browser.AppBrowser(self)
+
+ def handle(self):
+ fn, args = self._match(self.mapping, web.ctx.path)
+ return self._delegate(fn, self.fvars, args)
+
+ def handle_with_processors(self):
+ def process(processors):
+ try:
+ if processors:
+ p, processors = processors[0], processors[1:]
+ return p(lambda: process(processors))
+ else:
+ return self.handle()
+ except web.HTTPError:
+ raise
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ print >> web.debug, traceback.format_exc()
+ raise self.internalerror()
+
+ # processors must be applied in the resvere order. (??)
+ return process(self.processors)
+
+ def wsgifunc(self, *middleware):
+ """Returns a WSGI-compatible function for this application."""
+ def peep(iterator):
+ """Peeps into an iterator by doing an iteration
+ and returns an equivalent iterator.
+ """
+ # wsgi requires the headers first
+ # so we need to do an iteration
+ # and save the result for later
+ try:
+ firstchunk = iterator.next()
+ except StopIteration:
+ firstchunk = ''
+
+ return itertools.chain([firstchunk], iterator)
+
+ def is_generator(x): return x and hasattr(x, 'next')
+
+ def wsgi(env, start_resp):
+ # clear threadlocal to avoid inteference of previous requests
+ self._cleanup()
+
+ self.load(env)
+ try:
+ # allow uppercase methods only
+ if web.ctx.method.upper() != web.ctx.method:
+ raise web.nomethod()
+
+ result = self.handle_with_processors()
+ if is_generator(result):
+ result = peep(result)
+ else:
+ result = [result]
+ except web.HTTPError, e:
+ result = [e.data]
+
+ result = web.safestr(iter(result))
+
+ status, headers = web.ctx.status, web.ctx.headers
+ start_resp(status, headers)
+
+ def cleanup():
+ self._cleanup()
+ yield '' # force this function to be a generator
+
+ return itertools.chain(result, cleanup())
+
+ for m in middleware:
+ wsgi = m(wsgi)
+
+ return wsgi
+
+ def run(self, *middleware):
+ """
+ Starts handling requests. If called in a CGI or FastCGI context, it will follow
+ that protocol. If called from the command line, it will start an HTTP
+ server on the port named in the first command line argument, or, if there
+ is no argument, on port 8080.
+
+ `middleware` is a list of WSGI middleware which is applied to the resulting WSGI
+ function.
+ """
+ return wsgi.runwsgi(self.wsgifunc(*middleware))
+
+ def stop(self):
+ """Stops the http server started by run.
+ """
+ if httpserver.server:
+ httpserver.server.stop()
+ httpserver.server = None
+
+ def cgirun(self, *middleware):
+ """
+ Return a CGI handler. This is mostly useful with Google App Engine.
+ There you can just do:
+
+ main = app.cgirun()
+ """
+ wsgiapp = self.wsgifunc(*middleware)
+
+ try:
+ from google.appengine.ext.webapp.util import run_wsgi_app
+ return run_wsgi_app(wsgiapp)
+ except ImportError:
+ # we're not running from within Google App Engine
+ return wsgiref.handlers.CGIHandler().run(wsgiapp)
+
+ def gaerun(self, *middleware):
+ """
+ Starts the program in a way that will work with Google app engine,
+ no matter which version you are using (2.5 / 2.7)
+
+ If it is 2.5, just normally start it with app.gaerun()
+
+ If it is 2.7, make sure to change the app.yaml handler to point to the
+ global variable that contains the result of app.gaerun()
+
+ For example:
+
+ in app.yaml (where code.py is where the main code is located)
+
+ handlers:
+ - url: /.*
+ script: code.app
+
+ Make sure that the app variable is globally accessible
+ """
+ wsgiapp = self.wsgifunc(*middleware)
+ try:
+ # check what version of python is running
+ version = sys.version_info[:2]
+ major = version[0]
+ minor = version[1]
+
+ if major != 2:
+ raise EnvironmentError("Google App Engine only supports python 2.5 and 2.7")
+
+ # if 2.7, return a function that can be run by gae
+ if minor == 7:
+ return wsgiapp
+ # if 2.5, use run_wsgi_app
+ elif minor == 5:
+ from google.appengine.ext.webapp.util import run_wsgi_app
+ return run_wsgi_app(wsgiapp)
+ else:
+ raise EnvironmentError("Not a supported platform, use python 2.5 or 2.7")
+ except ImportError:
+ return wsgiref.handlers.CGIHandler().run(wsgiapp)
+
+ def load(self, env):
+ """Initializes ctx using env."""
+ ctx = web.ctx
+ ctx.clear()
+ ctx.status = '200 OK'
+ ctx.headers = []
+ ctx.output = ''
+ ctx.environ = ctx.env = env
+ ctx.host = env.get('HTTP_HOST')
+
+ if env.get('wsgi.url_scheme') in ['http', 'https']:
+ ctx.protocol = env['wsgi.url_scheme']
+ elif env.get('HTTPS', '').lower() in ['on', 'true', '1']:
+ ctx.protocol = 'https'
+ else:
+ ctx.protocol = 'http'
+ ctx.homedomain = ctx.protocol + '://' + env.get('HTTP_HOST', '[unknown]')
+ ctx.homepath = os.environ.get('REAL_SCRIPT_NAME', env.get('SCRIPT_NAME', ''))
+ ctx.home = ctx.homedomain + ctx.homepath
+ #@@ home is changed when the request is handled to a sub-application.
+ #@@ but the real home is required for doing absolute redirects.
+ ctx.realhome = ctx.home
+ ctx.ip = env.get('REMOTE_ADDR')
+ ctx.method = env.get('REQUEST_METHOD')
+ ctx.path = env.get('PATH_INFO') or ''
+ # http://trac.lighttpd.net/trac/ticket/406 requires:
+ if env.get('SERVER_SOFTWARE', '').startswith('lighttpd/'):
+ ctx.path = lstrips(env.get('REQUEST_URI').split('?')[0], ctx.homepath)
+ # Apache and CherryPy webservers unquote the url but lighttpd doesn't.
+ # unquote explicitly for lighttpd to make ctx.path uniform across all servers.
+ ctx.path = urllib.unquote(ctx.path)
+
+ if env.get('QUERY_STRING'):
+ ctx.query = '?' + env.get('QUERY_STRING', '')
+ else:
+ ctx.query = ''
+
+ ctx.fullpath = ctx.path + ctx.query
+
+ for k, v in ctx.iteritems():
+ # convert all string values to unicode values and replace
+ # malformed data with a suitable replacement marker.
+ if isinstance(v, str):
+ ctx[k] = v.decode('utf-8', 'replace')
+
+ # status must always be str
+ ctx.status = '200 OK'
+
+ ctx.app_stack = []
+
+ _handler_configurator = None
+
+ def set_handler_configurator(self, handler_configurator):
+ self._handler_configurator = handler_configurator
+
+ def configure_handler(self, handler):
+ if self._handler_configurator is not None:
+ self._handler_configurator(handler)
+
+ def _delegate(self, f, fvars, args=[]):
+ def handle_class(cls):
+ meth = web.ctx.method
+ if meth == 'HEAD' and not hasattr(cls, meth):
+ meth = 'GET'
+ if not hasattr(cls, meth):
+ raise web.nomethod(cls)
+ handler = cls()
+ self.configure_handler(handler)
+ tocall = getattr(handler, meth)
+ return tocall(*args)
+
+ def is_class(o): return isinstance(o, (types.ClassType, type))
+
+ if f is None:
+ raise web.notfound()
+ elif isinstance(f, application):
+ return f.handle_with_processors()
+ elif is_class(f):
+ return handle_class(f)
+ elif isinstance(f, basestring):
+ if f.startswith('redirect '):
+ url = f.split(' ', 1)[1]
+ if web.ctx.method == "GET":
+ x = web.ctx.env.get('QUERY_STRING', '')
+ if x:
+ url += '?' + x
+ raise web.redirect(url)
+ elif '.' in f:
+ mod, cls = f.rsplit('.', 1)
+ mod = __import__(mod, None, None, [''])
+ cls = getattr(mod, cls)
+ else:
+ cls = fvars[f]
+ return handle_class(cls)
+ elif hasattr(f, '__call__'):
+ return f()
+ else:
+ return web.notfound()
+
+ def _match(self, mapping, value):
+ for pat, what in mapping:
+ if isinstance(what, application):
+ if value.startswith(pat):
+ f = lambda: self._delegate_sub_application(pat, what)
+ return f, None
+ else:
+ continue
+ elif isinstance(what, basestring):
+ what, result = utils.re_subm('^' + pat + '$', what, value)
+ else:
+ result = utils.re_compile('^' + pat + '$').match(value)
+
+ if result: # it's a match
+ return what, [x for x in result.groups()]
+ return None, None
+
+ def _delegate_sub_application(self, dir, app):
+ """Deletes request to sub application `app` rooted at the directory `dir`.
+ The home, homepath, path and fullpath values in web.ctx are updated to mimic request
+ to the subapp and are restored after it is handled.
+
+ @@Any issues with when used with yield?
+ """
+ web.ctx._oldctx = web.storage(web.ctx)
+ web.ctx.home += dir
+ web.ctx.homepath += dir
+ web.ctx.path = web.ctx.path[len(dir):]
+ web.ctx.fullpath = web.ctx.fullpath[len(dir):]
+ return app.handle_with_processors()
+
+ def get_parent_app(self):
+ if self in web.ctx.app_stack:
+ index = web.ctx.app_stack.index(self)
+ if index > 0:
+ return web.ctx.app_stack[index-1]
+
+ def notfound(self):
+ """Returns HTTPError with '404 not found' message"""
+ parent = self.get_parent_app()
+ if parent:
+ return parent.notfound()
+ else:
+ return web._NotFound()
+
+ def internalerror(self):
+ """Returns HTTPError with '500 internal error' message"""
+ parent = self.get_parent_app()
+ if parent:
+ return parent.internalerror()
+ elif web.config.get('debug'):
+ import debugerror
+ return debugerror.debugerror()
+ else:
+ return web._InternalError()
+
+class auto_application(application):
+ """Application similar to `application` but urls are constructed
+ automatiacally using metaclass.
+
+ >>> app = auto_application()
+ >>> class hello(app.page):
+ ... def GET(self): return "hello, world"
+ ...
+ >>> class foo(app.page):
+ ... path = '/foo/.*'
+ ... def GET(self): return "foo"
+ >>> app.request("/hello").data
+ 'hello, world'
+ >>> app.request('/foo/bar').data
+ 'foo'
+ """
+ def __init__(self):
+ application.__init__(self)
+
+ class metapage(type):
+ def __init__(klass, name, bases, attrs):
+ type.__init__(klass, name, bases, attrs)
+ path = attrs.get('path', '/' + name)
+
+ # path can be specified as None to ignore that class
+ # typically required to create a abstract base class.
+ if path is not None:
+ self.add_mapping(path, klass)
+
+ class page:
+ path = None
+ __metaclass__ = metapage
+
+ self.page = page
+
+# The application class already has the required functionality of subdir_application
+subdir_application = application
+
+class subdomain_application(application):
+ """
+ Application to delegate requests based on the host.
+
+ >>> urls = ("/hello", "hello")
+ >>> app = application(urls, globals())
+ >>> class hello:
+ ... def GET(self): return "hello"
+ >>>
+ >>> mapping = (r"hello\.example\.com", app)
+ >>> app2 = subdomain_application(mapping)
+ >>> app2.request("/hello", host="hello.example.com").data
+ 'hello'
+ >>> response = app2.request("/hello", host="something.example.com")
+ >>> response.status
+ '404 Not Found'
+ >>> response.data
+ 'not found'
+ """
+ def handle(self):
+ host = web.ctx.host.split(':')[0] #strip port
+ fn, args = self._match(self.mapping, host)
+ return self._delegate(fn, self.fvars, args)
+
+ def _match(self, mapping, value):
+ for pat, what in mapping:
+ if isinstance(what, basestring):
+ what, result = utils.re_subm('^' + pat + '$', what, value)
+ else:
+ result = utils.re_compile('^' + pat + '$').match(value)
+
+ if result: # it's a match
+ return what, [x for x in result.groups()]
+ return None, None
+
+def loadhook(h):
+ """
+ Converts a load hook into an application processor.
+
+ >>> app = auto_application()
+ >>> def f(): "something done before handling request"
+ ...
+ >>> app.add_processor(loadhook(f))
+ """
+ def processor(handler):
+ h()
+ return handler()
+
+ return processor
+
+def unloadhook(h):
+ """
+ Converts an unload hook into an application processor.
+
+ >>> app = auto_application()
+ >>> def f(): "something done after handling request"
+ ...
+ >>> app.add_processor(unloadhook(f))
+ """
+ def processor(handler):
+ try:
+ result = handler()
+ is_generator = result and hasattr(result, 'next')
+ except:
+ # run the hook even when handler raises some exception
+ h()
+ raise
+
+ if is_generator:
+ return wrap(result)
+ else:
+ h()
+ return result
+
+ def wrap(result):
+ def next():
+ try:
+ return result.next()
+ except:
+ # call the hook at the and of iterator
+ h()
+ raise
+
+ result = iter(result)
+ while True:
+ yield next()
+
+ return processor
+
+def autodelegate(prefix=''):
+ """
+ Returns a method that takes one argument and calls the method named prefix+arg,
+ calling `notfound()` if there isn't one. Example:
+
+ urls = ('/prefs/(.*)', 'prefs')
+
+ class prefs:
+ GET = autodelegate('GET_')
+ def GET_password(self): pass
+ def GET_privacy(self): pass
+
+ `GET_password` would get called for `/prefs/password` while `GET_privacy` for
+ `GET_privacy` gets called for `/prefs/privacy`.
+
+ If a user visits `/prefs/password/change` then `GET_password(self, '/change')`
+ is called.
+ """
+ def internal(self, arg):
+ if '/' in arg:
+ first, rest = arg.split('/', 1)
+ func = prefix + first
+ args = ['/' + rest]
+ else:
+ func = prefix + arg
+ args = []
+
+ if hasattr(self, func):
+ try:
+ return getattr(self, func)(*args)
+ except TypeError:
+ raise web.notfound()
+ else:
+ raise web.notfound()
+ return internal
+
+class Reloader:
+ """Checks to see if any loaded modules have changed on disk and,
+ if so, reloads them.
+ """
+
+ """File suffix of compiled modules."""
+ if sys.platform.startswith('java'):
+ SUFFIX = '$py.class'
+ else:
+ SUFFIX = '.pyc'
+
+ def __init__(self):
+ self.mtimes = {}
+
+ def __call__(self):
+ for mod in sys.modules.values():
+ self.check(mod)
+
+ def check(self, mod):
+ # jython registers java packages as modules but they either
+ # don't have a __file__ attribute or its value is None
+ if not (mod and hasattr(mod, '__file__') and mod.__file__):
+ return
+
+ try:
+ mtime = os.stat(mod.__file__).st_mtime
+ except (OSError, IOError):
+ return
+ if mod.__file__.endswith(self.__class__.SUFFIX) and os.path.exists(mod.__file__[:-1]):
+ mtime = max(os.stat(mod.__file__[:-1]).st_mtime, mtime)
+
+ if mod not in self.mtimes:
+ self.mtimes[mod] = mtime
+ elif self.mtimes[mod] < mtime:
+ try:
+ reload(mod)
+ self.mtimes[mod] = mtime
+ except ImportError:
+ pass
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/lib/nulib/python/nulib/ext/web/browser.py b/lib/nulib/python/nulib/ext/web/browser.py
new file mode 100644
index 0000000..66d859e
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/browser.py
@@ -0,0 +1,236 @@
+"""Browser to test web applications.
+(from web.py)
+"""
+from utils import re_compile
+from net import htmlunquote
+
+import httplib, urllib, urllib2
+import copy
+from StringIO import StringIO
+
+DEBUG = False
+
+__all__ = [
+ "BrowserError",
+ "Browser", "AppBrowser",
+ "AppHandler"
+]
+
+class BrowserError(Exception):
+ pass
+
+class Browser:
+ def __init__(self):
+ import cookielib
+ self.cookiejar = cookielib.CookieJar()
+ self._cookie_processor = urllib2.HTTPCookieProcessor(self.cookiejar)
+ self.form = None
+
+ self.url = "http://0.0.0.0:8080/"
+ self.path = "/"
+
+ self.status = None
+ self.data = None
+ self._response = None
+ self._forms = None
+
+ def reset(self):
+ """Clears all cookies and history."""
+ self.cookiejar.clear()
+
+ def build_opener(self):
+ """Builds the opener using urllib2.build_opener.
+ Subclasses can override this function to prodive custom openers.
+ """
+ return urllib2.build_opener()
+
+ def do_request(self, req):
+ if DEBUG:
+ print 'requesting', req.get_method(), req.get_full_url()
+ opener = self.build_opener()
+ opener.add_handler(self._cookie_processor)
+ try:
+ self._response = opener.open(req)
+ except urllib2.HTTPError, e:
+ self._response = e
+
+ self.url = self._response.geturl()
+ self.path = urllib2.Request(self.url).get_selector()
+ self.data = self._response.read()
+ self.status = self._response.code
+ self._forms = None
+ self.form = None
+ return self.get_response()
+
+ def open(self, url, data=None, headers={}):
+ """Opens the specified url."""
+ url = urllib.basejoin(self.url, url)
+ req = urllib2.Request(url, data, headers)
+ return self.do_request(req)
+
+ def show(self):
+ """Opens the current page in real web browser."""
+ f = open('page.html', 'w')
+ f.write(self.data)
+ f.close()
+
+ import webbrowser, os
+ url = 'file://' + os.path.abspath('page.html')
+ webbrowser.open(url)
+
+ def get_response(self):
+ """Returns a copy of the current response."""
+ return urllib.addinfourl(StringIO(self.data), self._response.info(), self._response.geturl())
+
+ def get_soup(self):
+ """Returns beautiful soup of the current document."""
+ import BeautifulSoup
+ return BeautifulSoup.BeautifulSoup(self.data)
+
+ def get_text(self, e=None):
+ """Returns content of e or the current document as plain text."""
+ e = e or self.get_soup()
+ return ''.join([htmlunquote(c) for c in e.recursiveChildGenerator() if isinstance(c, unicode)])
+
+ def _get_links(self):
+ soup = self.get_soup()
+ return [a for a in soup.findAll(name='a')]
+
+ def get_links(self, text=None, text_regex=None, url=None, url_regex=None, predicate=None):
+ """Returns all links in the document."""
+ return self._filter_links(self._get_links(),
+ text=text, text_regex=text_regex, url=url, url_regex=url_regex, predicate=predicate)
+
+ def follow_link(self, link=None, text=None, text_regex=None, url=None, url_regex=None, predicate=None):
+ if link is None:
+ links = self._filter_links(self.get_links(),
+ text=text, text_regex=text_regex, url=url, url_regex=url_regex, predicate=predicate)
+ link = links and links[0]
+
+ if link:
+ return self.open(link['href'])
+ else:
+ raise BrowserError("No link found")
+
+ def find_link(self, text=None, text_regex=None, url=None, url_regex=None, predicate=None):
+ links = self._filter_links(self.get_links(),
+ text=text, text_regex=text_regex, url=url, url_regex=url_regex, predicate=predicate)
+ return links and links[0] or None
+
+ def _filter_links(self, links,
+ text=None, text_regex=None,
+ url=None, url_regex=None,
+ predicate=None):
+ predicates = []
+ if text is not None:
+ predicates.append(lambda link: link.string == text)
+ if text_regex is not None:
+ predicates.append(lambda link: re_compile(text_regex).search(link.string or ''))
+ if url is not None:
+ predicates.append(lambda link: link.get('href') == url)
+ if url_regex is not None:
+ predicates.append(lambda link: re_compile(url_regex).search(link.get('href', '')))
+ if predicate:
+ predicate.append(predicate)
+
+ def f(link):
+ for p in predicates:
+ if not p(link):
+ return False
+ return True
+
+ return [link for link in links if f(link)]
+
+ def get_forms(self):
+ """Returns all forms in the current document.
+ The returned form objects implement the ClientForm.HTMLForm interface.
+ """
+ if self._forms is None:
+ import ClientForm
+ self._forms = ClientForm.ParseResponse(self.get_response(), backwards_compat=False)
+ return self._forms
+
+ def select_form(self, name=None, predicate=None, index=0):
+ """Selects the specified form."""
+ forms = self.get_forms()
+
+ if name is not None:
+ forms = [f for f in forms if f.name == name]
+ if predicate:
+ forms = [f for f in forms if predicate(f)]
+
+ if forms:
+ self.form = forms[index]
+ return self.form
+ else:
+ raise BrowserError("No form selected.")
+
+ def submit(self, **kw):
+ """submits the currently selected form."""
+ if self.form is None:
+ raise BrowserError("No form selected.")
+ req = self.form.click(**kw)
+ return self.do_request(req)
+
+ def __getitem__(self, key):
+ return self.form[key]
+
+ def __setitem__(self, key, value):
+ self.form[key] = value
+
+class AppBrowser(Browser):
+ """Browser interface to test web.py apps.
+
+ b = AppBrowser(app)
+ b.open('/')
+ b.follow_link(text='Login')
+
+ b.select_form(name='login')
+ b['username'] = 'joe'
+ b['password'] = 'secret'
+ b.submit()
+
+ assert b.path == '/'
+ assert 'Welcome joe' in b.get_text()
+ """
+ def __init__(self, app):
+ Browser.__init__(self)
+ self.app = app
+
+ def build_opener(self):
+ return urllib2.build_opener(AppHandler(self.app))
+
+class AppHandler(urllib2.HTTPHandler):
+ """urllib2 handler to handle requests using web.py application."""
+ handler_order = 100
+
+ def __init__(self, app):
+ self.app = app
+
+ def http_open(self, req):
+ result = self.app.request(
+ localpart=req.get_selector(),
+ method=req.get_method(),
+ host=req.get_host(),
+ data=req.get_data(),
+ headers=dict(req.header_items()),
+ https=req.get_type() == "https"
+ )
+ return self._make_response(result, req.get_full_url())
+
+ def https_open(self, req):
+ return self.http_open(req)
+
+ try:
+ https_request = urllib2.HTTPHandler.do_request_
+ except AttributeError:
+ # for python 2.3
+ pass
+
+ def _make_response(self, result, url):
+ data = "\r\n".join(["%s: %s" % (k, v) for k, v in result.header_items])
+ headers = httplib.HTTPMessage(StringIO(data))
+ response = urllib.addinfourl(StringIO(result.data), headers, url)
+ code, msg = result.status.split(None, 1)
+ response.code, response.msg = int(code), msg
+ return response
diff --git a/lib/nulib/python/nulib/ext/web/contrib/__init__.py b/lib/nulib/python/nulib/ext/web/contrib/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/lib/nulib/python/nulib/ext/web/contrib/template.py b/lib/nulib/python/nulib/ext/web/contrib/template.py
new file mode 100644
index 0000000..7495d39
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/contrib/template.py
@@ -0,0 +1,131 @@
+"""
+Interface to various templating engines.
+"""
+import os.path
+
+__all__ = [
+ "render_cheetah", "render_genshi", "render_mako",
+ "cache",
+]
+
+class render_cheetah:
+ """Rendering interface to Cheetah Templates.
+
+ Example:
+
+ render = render_cheetah('templates')
+ render.hello(name="cheetah")
+ """
+ def __init__(self, path):
+ # give error if Chetah is not installed
+ from Cheetah.Template import Template
+ self.path = path
+
+ def __getattr__(self, name):
+ from Cheetah.Template import Template
+ path = os.path.join(self.path, name + ".html")
+
+ def template(**kw):
+ t = Template(file=path, searchList=[kw])
+ return t.respond()
+
+ return template
+
+class render_genshi:
+ """Rendering interface genshi templates.
+ Example:
+
+ for xml/html templates.
+
+ render = render_genshi(['templates/'])
+ render.hello(name='genshi')
+
+ For text templates:
+
+ render = render_genshi(['templates/'], type='text')
+ render.hello(name='genshi')
+ """
+
+ def __init__(self, *a, **kwargs):
+ from genshi.template import TemplateLoader
+
+ self._type = kwargs.pop('type', None)
+ self._loader = TemplateLoader(*a, **kwargs)
+
+ def __getattr__(self, name):
+ # Assuming all templates are html
+ path = name + ".html"
+
+ if self._type == "text":
+ from genshi.template import TextTemplate
+ cls = TextTemplate
+ type = "text"
+ else:
+ cls = None
+ type = None
+
+ t = self._loader.load(path, cls=cls)
+ def template(**kw):
+ stream = t.generate(**kw)
+ if type:
+ return stream.render(type)
+ else:
+ return stream.render()
+ return template
+
+class render_jinja:
+ """Rendering interface to Jinja2 Templates
+
+ Example:
+
+ render= render_jinja('templates')
+ render.hello(name='jinja2')
+ """
+ def __init__(self, *a, **kwargs):
+ extensions = kwargs.pop('extensions', [])
+ globals = kwargs.pop('globals', {})
+
+ from jinja2 import Environment,FileSystemLoader
+ self._lookup = Environment(loader=FileSystemLoader(*a, **kwargs), extensions=extensions)
+ self._lookup.globals.update(globals)
+
+ def __getattr__(self, name):
+ # Assuming all templates end with .html
+ path = name + '.html'
+ t = self._lookup.get_template(path)
+ return t.render
+
+class render_mako:
+ """Rendering interface to Mako Templates.
+
+ Example:
+
+ render = render_mako(directories=['templates'])
+ render.hello(name="mako")
+ """
+ def __init__(self, *a, **kwargs):
+ from mako.lookup import TemplateLookup
+ self._lookup = TemplateLookup(*a, **kwargs)
+
+ def __getattr__(self, name):
+ # Assuming all templates are html
+ path = name + ".html"
+ t = self._lookup.get_template(path)
+ return t.render
+
+class cache:
+ """Cache for any rendering interface.
+
+ Example:
+
+ render = cache(render_cheetah("templates/"))
+ render.hello(name='cache')
+ """
+ def __init__(self, render):
+ self._render = render
+ self._cache = {}
+
+ def __getattr__(self, name):
+ if name not in self._cache:
+ self._cache[name] = getattr(self._render, name)
+ return self._cache[name]
diff --git a/lib/nulib/python/nulib/ext/web/db.py b/lib/nulib/python/nulib/ext/web/db.py
new file mode 100644
index 0000000..e52a76d
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/db.py
@@ -0,0 +1,1281 @@
+"""
+Database API
+(part of web.py)
+"""
+
+__all__ = [
+ "UnknownParamstyle", "UnknownDB", "TransactionError",
+ "sqllist", "sqlors", "reparam", "sqlquote",
+ "SQLQuery", "SQLParam", "sqlparam",
+ "SQLLiteral", "sqlliteral",
+ "database", 'DB',
+]
+
+import time, os, urllib, urlparse
+try:
+ import datetime
+except ImportError:
+ datetime = None
+
+try: set
+except NameError:
+ from sets import Set as set
+
+from utils import threadeddict, storage, iters, iterbetter, safestr, safeunicode
+
+try:
+ # db module can work independent of web.py
+ from webapi import debug, config
+except:
+ import sys
+ debug = sys.stderr
+ config = storage()
+
+class UnknownDB(Exception):
+ """raised for unsupported dbms"""
+ pass
+
+class _ItplError(ValueError):
+ def __init__(self, text, pos):
+ ValueError.__init__(self)
+ self.text = text
+ self.pos = pos
+ def __str__(self):
+ return "unfinished expression in %s at char %d" % (
+ repr(self.text), self.pos)
+
+class TransactionError(Exception): pass
+
+class UnknownParamstyle(Exception):
+ """
+ raised for unsupported db paramstyles
+
+ (currently supported: qmark, numeric, format, pyformat)
+ """
+ pass
+
+class SQLParam(object):
+ """
+ Parameter in SQLQuery.
+
+ >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
+ >>> q
+
+ >>> q.query()
+ 'SELECT * FROM test WHERE name=%s'
+ >>> q.values()
+ ['joe']
+ """
+ __slots__ = ["value"]
+
+ def __init__(self, value):
+ self.value = value
+
+ def get_marker(self, paramstyle='pyformat'):
+ if paramstyle == 'qmark':
+ return '?'
+ elif paramstyle == 'numeric':
+ return ':1'
+ elif paramstyle is None or paramstyle in ['format', 'pyformat']:
+ return '%s'
+ raise UnknownParamstyle, paramstyle
+
+ def sqlquery(self):
+ return SQLQuery([self])
+
+ def __add__(self, other):
+ return self.sqlquery() + other
+
+ def __radd__(self, other):
+ return other + self.sqlquery()
+
+ def __str__(self):
+ return str(self.value)
+
+ def __repr__(self):
+ return '' % repr(self.value)
+
+sqlparam = SQLParam
+
+class SQLQuery(object):
+ """
+ You can pass this sort of thing as a clause in any db function.
+ Otherwise, you can pass a dictionary to the keyword argument `vars`
+ and the function will call reparam for you.
+
+ Internally, consists of `items`, which is a list of strings and
+ SQLParams, which get concatenated to produce the actual query.
+ """
+ __slots__ = ["items"]
+
+ # tested in sqlquote's docstring
+ def __init__(self, items=None):
+ r"""Creates a new SQLQuery.
+
+ >>> SQLQuery("x")
+
+ >>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
+ >>> q
+
+ >>> q.query(), q.values()
+ ('SELECT * FROM test WHERE x=%s', [1])
+ >>> SQLQuery(SQLParam(1))
+
+ """
+ if items is None:
+ self.items = []
+ elif isinstance(items, list):
+ self.items = items
+ elif isinstance(items, SQLParam):
+ self.items = [items]
+ elif isinstance(items, SQLQuery):
+ self.items = list(items.items)
+ else:
+ self.items = [items]
+
+ # Take care of SQLLiterals
+ for i, item in enumerate(self.items):
+ if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral):
+ self.items[i] = item.value.v
+
+ def append(self, value):
+ self.items.append(value)
+
+ def __add__(self, other):
+ if isinstance(other, basestring):
+ items = [other]
+ elif isinstance(other, SQLQuery):
+ items = other.items
+ else:
+ return NotImplemented
+ return SQLQuery(self.items + items)
+
+ def __radd__(self, other):
+ if isinstance(other, basestring):
+ items = [other]
+ else:
+ return NotImplemented
+
+ return SQLQuery(items + self.items)
+
+ def __iadd__(self, other):
+ if isinstance(other, (basestring, SQLParam)):
+ self.items.append(other)
+ elif isinstance(other, SQLQuery):
+ self.items.extend(other.items)
+ else:
+ return NotImplemented
+ return self
+
+ def __len__(self):
+ return len(self.query())
+
+ def query(self, paramstyle=None):
+ """
+ Returns the query part of the sql query.
+ >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
+ >>> q.query()
+ 'SELECT * FROM test WHERE name=%s'
+ >>> q.query(paramstyle='qmark')
+ 'SELECT * FROM test WHERE name=?'
+ """
+ s = []
+ for x in self.items:
+ if isinstance(x, SQLParam):
+ x = x.get_marker(paramstyle)
+ s.append(safestr(x))
+ else:
+ x = safestr(x)
+ # automatically escape % characters in the query
+ # For backward compatability, ignore escaping when the query looks already escaped
+ if paramstyle in ['format', 'pyformat']:
+ if '%' in x and '%%' not in x:
+ x = x.replace('%', '%%')
+ s.append(x)
+ return "".join(s)
+
+ def values(self):
+ """
+ Returns the values of the parameters used in the sql query.
+ >>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
+ >>> q.values()
+ ['joe']
+ """
+ return [i.value for i in self.items if isinstance(i, SQLParam)]
+
+ def join(items, sep=' ', prefix=None, suffix=None, target=None):
+ """
+ Joins multiple queries.
+
+ >>> SQLQuery.join(['a', 'b'], ', ')
+
+
+ Optinally, prefix and suffix arguments can be provided.
+
+ >>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')')
+
+
+ If target argument is provided, the items are appended to target instead of creating a new SQLQuery.
+ """
+ if target is None:
+ target = SQLQuery()
+
+ target_items = target.items
+
+ if prefix:
+ target_items.append(prefix)
+
+ for i, item in enumerate(items):
+ if i != 0:
+ target_items.append(sep)
+ if isinstance(item, SQLQuery):
+ target_items.extend(item.items)
+ else:
+ target_items.append(item)
+
+ if suffix:
+ target_items.append(suffix)
+ return target
+
+ join = staticmethod(join)
+
+ def _str(self):
+ try:
+ return self.query() % tuple([sqlify(x) for x in self.values()])
+ except (ValueError, TypeError):
+ return self.query()
+
+ def __str__(self):
+ return safestr(self._str())
+
+ def __unicode__(self):
+ return safeunicode(self._str())
+
+ def __repr__(self):
+ return '' % repr(str(self))
+
+class SQLLiteral:
+ """
+ Protects a string from `sqlquote`.
+
+ >>> sqlquote('NOW()')
+
+ >>> sqlquote(SQLLiteral('NOW()'))
+
+ """
+ def __init__(self, v):
+ self.v = v
+
+ def __repr__(self):
+ return self.v
+
+sqlliteral = SQLLiteral
+
+def _sqllist(values):
+ """
+ >>> _sqllist([1, 2, 3])
+
+ """
+ items = []
+ items.append('(')
+ for i, v in enumerate(values):
+ if i != 0:
+ items.append(', ')
+ items.append(sqlparam(v))
+ items.append(')')
+ return SQLQuery(items)
+
+def reparam(string_, dictionary):
+ """
+ Takes a string and a dictionary and interpolates the string
+ using values from the dictionary. Returns an `SQLQuery` for the result.
+
+ >>> reparam("s = $s", dict(s=True))
+
+ >>> reparam("s IN $s", dict(s=[1, 2]))
+
+ """
+ dictionary = dictionary.copy() # eval mucks with it
+ # disable builtins to avoid risk for remote code exection.
+ dictionary['__builtins__'] = object()
+ vals = []
+ result = []
+ for live, chunk in _interpolate(string_):
+ if live:
+ v = eval(chunk, dictionary)
+ result.append(sqlquote(v))
+ else:
+ result.append(chunk)
+ return SQLQuery.join(result, '')
+
+def sqlify(obj):
+ """
+ converts `obj` to its proper SQL version
+
+ >>> sqlify(None)
+ 'NULL'
+ >>> sqlify(True)
+ "'t'"
+ >>> sqlify(3)
+ '3'
+ """
+ # because `1 == True and hash(1) == hash(True)`
+ # we have to do this the hard way...
+
+ if obj is None:
+ return 'NULL'
+ elif obj is True:
+ return "'t'"
+ elif obj is False:
+ return "'f'"
+ elif isinstance(obj, long):
+ return str(obj)
+ elif datetime and isinstance(obj, datetime.datetime):
+ return repr(obj.isoformat())
+ else:
+ if isinstance(obj, unicode): obj = obj.encode('utf8')
+ return repr(obj)
+
+def sqllist(lst):
+ """
+ Converts the arguments for use in something like a WHERE clause.
+
+ >>> sqllist(['a', 'b'])
+ 'a, b'
+ >>> sqllist('a')
+ 'a'
+ >>> sqllist(u'abc')
+ u'abc'
+ """
+ if isinstance(lst, basestring):
+ return lst
+ else:
+ return ', '.join(lst)
+
+def sqlors(left, lst):
+ """
+ `left is a SQL clause like `tablename.arg = `
+ and `lst` is a list of values. Returns a reparam-style
+ pair featuring the SQL that ORs together the clause
+ for each item in the lst.
+
+ >>> sqlors('foo = ', [])
+
+ >>> sqlors('foo = ', [1])
+
+ >>> sqlors('foo = ', 1)
+
+ >>> sqlors('foo = ', [1,2,3])
+
+ """
+ if isinstance(lst, iters):
+ lst = list(lst)
+ ln = len(lst)
+ if ln == 0:
+ return SQLQuery("1=2")
+ if ln == 1:
+ lst = lst[0]
+
+ if isinstance(lst, iters):
+ return SQLQuery(['('] +
+ sum([[left, sqlparam(x), ' OR '] for x in lst], []) +
+ ['1=2)']
+ )
+ else:
+ return left + sqlparam(lst)
+
+def sqlwhere(dictionary, grouping=' AND '):
+ """
+ Converts a `dictionary` to an SQL WHERE clause `SQLQuery`.
+
+ >>> sqlwhere({'cust_id': 2, 'order_id':3})
+
+ >>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ')
+
+ >>> sqlwhere({'a': 'a', 'b': 'b'}).query()
+ 'a = %s AND b = %s'
+ """
+ return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping)
+
+def sqlquote(a):
+ """
+ Ensures `a` is quoted properly for use in a SQL query.
+
+ >>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3)
+
+ >>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3])
+
+ """
+ if isinstance(a, list):
+ return _sqllist(a)
+ else:
+ return sqlparam(a).sqlquery()
+
+class Transaction:
+ """Database transaction."""
+ def __init__(self, ctx):
+ self.ctx = ctx
+ self.transaction_count = transaction_count = len(ctx.transactions)
+
+ class transaction_engine:
+ """Transaction Engine used in top level transactions."""
+ def do_transact(self):
+ ctx.commit(unload=False)
+
+ def do_commit(self):
+ ctx.commit()
+
+ def do_rollback(self):
+ ctx.rollback()
+
+ class subtransaction_engine:
+ """Transaction Engine used in sub transactions."""
+ def query(self, q):
+ db_cursor = ctx.db.cursor()
+ ctx.db_execute(db_cursor, SQLQuery(q % transaction_count))
+
+ def do_transact(self):
+ self.query('SAVEPOINT webpy_sp_%s')
+
+ def do_commit(self):
+ self.query('RELEASE SAVEPOINT webpy_sp_%s')
+
+ def do_rollback(self):
+ self.query('ROLLBACK TO SAVEPOINT webpy_sp_%s')
+
+ class dummy_engine:
+ """Transaction Engine used instead of subtransaction_engine
+ when sub transactions are not supported."""
+ do_transact = do_commit = do_rollback = lambda self: None
+
+ if self.transaction_count:
+ # nested transactions are not supported in some databases
+ if self.ctx.get('ignore_nested_transactions'):
+ self.engine = dummy_engine()
+ else:
+ self.engine = subtransaction_engine()
+ else:
+ self.engine = transaction_engine()
+
+ self.engine.do_transact()
+ self.ctx.transactions.append(self)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exctype, excvalue, traceback):
+ if exctype is not None:
+ self.rollback()
+ else:
+ self.commit()
+
+ def commit(self):
+ if len(self.ctx.transactions) > self.transaction_count:
+ self.engine.do_commit()
+ self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
+
+ def rollback(self):
+ if len(self.ctx.transactions) > self.transaction_count:
+ self.engine.do_rollback()
+ self.ctx.transactions = self.ctx.transactions[:self.transaction_count]
+
+class DB:
+ """Database"""
+ def __init__(self, db_module, keywords):
+ """Creates a database.
+ """
+ # some DB implementaions take optional paramater `driver` to use a specific driver modue
+ # but it should not be passed to connect
+ keywords.pop('driver', None)
+
+ self.db_module = db_module
+ self.keywords = keywords
+
+ self._ctx = threadeddict()
+ # flag to enable/disable printing queries
+ self.printing = config.get('debug_sql', config.get('debug', False))
+ self.supports_multiple_insert = False
+
+ try:
+ import DBUtils
+ # enable pooling if DBUtils module is available.
+ self.has_pooling = True
+ except ImportError:
+ self.has_pooling = False
+
+ # Pooling can be disabled by passing pooling=False in the keywords.
+ self.has_pooling = self.keywords.pop('pooling', True) and self.has_pooling
+
+ def _getctx(self):
+ if not self._ctx.get('db'):
+ self._load_context(self._ctx)
+ return self._ctx
+ ctx = property(_getctx)
+
+ def _load_context(self, ctx):
+ ctx.dbq_count = 0
+ ctx.transactions = [] # stack of transactions
+
+ if self.has_pooling:
+ ctx.db = self._connect_with_pooling(self.keywords)
+ else:
+ ctx.db = self._connect(self.keywords)
+ ctx.db_execute = self._db_execute
+
+ if not hasattr(ctx.db, 'commit'):
+ ctx.db.commit = lambda: None
+
+ if not hasattr(ctx.db, 'rollback'):
+ ctx.db.rollback = lambda: None
+
+ def commit(unload=True):
+ # do db commit and release the connection if pooling is enabled.
+ ctx.db.commit()
+ if unload and self.has_pooling:
+ self._unload_context(self._ctx)
+
+ def rollback():
+ # do db rollback and release the connection if pooling is enabled.
+ ctx.db.rollback()
+ if self.has_pooling:
+ self._unload_context(self._ctx)
+
+ ctx.commit = commit
+ ctx.rollback = rollback
+
+ def _unload_context(self, ctx):
+ del ctx.db
+
+ def _connect(self, keywords):
+ return self.db_module.connect(**keywords)
+
+ def _connect_with_pooling(self, keywords):
+ def get_pooled_db():
+ from DBUtils import PooledDB
+
+ # In DBUtils 0.9.3, `dbapi` argument is renamed as `creator`
+ # see Bug#122112
+
+ if PooledDB.__version__.split('.') < '0.9.3'.split('.'):
+ return PooledDB.PooledDB(dbapi=self.db_module, **keywords)
+ else:
+ return PooledDB.PooledDB(creator=self.db_module, **keywords)
+
+ if getattr(self, '_pooleddb', None) is None:
+ self._pooleddb = get_pooled_db()
+
+ return self._pooleddb.connection()
+
+ def _db_cursor(self):
+ return self.ctx.db.cursor()
+
+ def _param_marker(self):
+ """Returns parameter marker based on paramstyle attribute if this database."""
+ style = getattr(self, 'paramstyle', 'pyformat')
+
+ if style == 'qmark':
+ return '?'
+ elif style == 'numeric':
+ return ':1'
+ elif style in ['format', 'pyformat']:
+ return '%s'
+ raise UnknownParamstyle, style
+
+ def _db_execute(self, cur, sql_query):
+ """executes an sql query"""
+ self.ctx.dbq_count += 1
+
+ try:
+ a = time.time()
+ query, params = self._process_query(sql_query)
+ out = cur.execute(query, params)
+ b = time.time()
+ except:
+ if self.printing:
+ print >> debug, 'ERR:', str(sql_query)
+ if self.ctx.transactions:
+ self.ctx.transactions[-1].rollback()
+ else:
+ self.ctx.rollback()
+ raise
+
+ if self.printing:
+ print >> debug, '%s (%s): %s' % (round(b-a, 2), self.ctx.dbq_count, str(sql_query))
+ return out
+
+ def _process_query(self, sql_query):
+ """Takes the SQLQuery object and returns query string and parameters.
+ """
+ paramstyle = getattr(self, 'paramstyle', 'pyformat')
+ query = sql_query.query(paramstyle)
+ params = sql_query.values()
+ return query, params
+
+ def _where(self, where, vars):
+ if isinstance(where, (int, long)):
+ where = "id = " + sqlparam(where)
+ #@@@ for backward-compatibility
+ elif isinstance(where, (list, tuple)) and len(where) == 2:
+ where = SQLQuery(where[0], where[1])
+ elif isinstance(where, dict):
+ where = self._where_dict(where)
+ elif isinstance(where, SQLQuery):
+ pass
+ else:
+ where = reparam(where, vars)
+ return where
+
+ def _where_dict(self, where):
+ where_clauses = []
+ for k, v in where.iteritems():
+ if isinstance(v, list):
+ where_clauses.append(k + ' IN ' + sqlquote(v))
+ else:
+ where_clauses.append(k + ' = ' + sqlquote(v))
+ if where_clauses:
+ return SQLQuery.join(where_clauses, " AND ")
+ else:
+ return None
+
+ def query(self, sql_query, vars=None, processed=False, _test=False):
+ """
+ Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
+ If `processed=True`, `vars` is a `reparam`-style list to use
+ instead of interpolating.
+
+ >>> db = DB(None, {})
+ >>> db.query("SELECT * FROM foo", _test=True)
+
+ >>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True)
+
+ >>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True)
+
+ """
+ if vars is None: vars = {}
+
+ if not processed and not isinstance(sql_query, SQLQuery):
+ sql_query = reparam(sql_query, vars)
+
+ if _test: return sql_query
+
+ db_cursor = self._db_cursor()
+ self._db_execute(db_cursor, sql_query)
+
+ if db_cursor.description:
+ names = [x[0] for x in db_cursor.description]
+ def iterwrapper():
+ row = db_cursor.fetchone()
+ while row:
+ yield storage(dict(zip(names, row)))
+ row = db_cursor.fetchone()
+ out = iterbetter(iterwrapper())
+ out.__len__ = lambda: int(db_cursor.rowcount)
+ out.list = lambda: [storage(dict(zip(names, x))) \
+ for x in db_cursor.fetchall()]
+ else:
+ out = db_cursor.rowcount
+
+ if not self.ctx.transactions:
+ self.ctx.commit()
+ return out
+
+ def select(self, tables, vars=None, what='*', where=None, order=None, group=None,
+ limit=None, offset=None, _test=False):
+ """
+ Selects `what` from `tables` with clauses `where`, `order`,
+ `group`, `limit`, and `offset`. Uses vars to interpolate.
+ Otherwise, each clause can be a SQLQuery.
+
+ >>> db = DB(None, {})
+ >>> db.select('foo', _test=True)
+
+ >>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True)
+
+ >>> db.select('foo', where={'id': 5}, _test=True)
+
+ """
+ if vars is None: vars = {}
+ sql_clauses = self.sql_clauses(what, tables, where, group, order, limit, offset)
+ clauses = [self.gen_clause(sql, val, vars) for sql, val in sql_clauses if val is not None]
+ qout = SQLQuery.join(clauses)
+ if _test: return qout
+ return self.query(qout, processed=True)
+
+ def where(self, table, what='*', order=None, group=None, limit=None,
+ offset=None, _test=False, **kwargs):
+ """
+ Selects from `table` where keys are equal to values in `kwargs`.
+
+ >>> db = DB(None, {})
+ >>> db.where('foo', bar_id=3, _test=True)
+
+ >>> db.where('foo', source=2, crust='dewey', _test=True)
+
+ >>> db.where('foo', _test=True)
+
+ """
+ where = self._where_dict(kwargs)
+ return self.select(table, what=what, order=order,
+ group=group, limit=limit, offset=offset, _test=_test,
+ where=where)
+
+ def sql_clauses(self, what, tables, where, group, order, limit, offset):
+ return (
+ ('SELECT', what),
+ ('FROM', sqllist(tables)),
+ ('WHERE', where),
+ ('GROUP BY', group),
+ ('ORDER BY', order),
+ ('LIMIT', limit),
+ ('OFFSET', offset))
+
+ def gen_clause(self, sql, val, vars):
+ if isinstance(val, (int, long)):
+ if sql == 'WHERE':
+ nout = 'id = ' + sqlquote(val)
+ else:
+ nout = SQLQuery(val)
+ #@@@
+ elif isinstance(val, (list, tuple)) and len(val) == 2:
+ nout = SQLQuery(val[0], val[1]) # backwards-compatibility
+ elif sql == 'WHERE' and isinstance(val, dict):
+ nout = self._where_dict(val)
+ elif isinstance(val, SQLQuery):
+ nout = val
+ else:
+ nout = reparam(val, vars)
+
+ def xjoin(a, b):
+ if a and b: return a + ' ' + b
+ else: return a or b
+
+ return xjoin(sql, nout)
+
+ def insert(self, tablename, seqname=None, _test=False, **values):
+ """
+ Inserts `values` into `tablename`. Returns current sequence ID.
+ Set `seqname` to the ID if it's not the default, or to `False`
+ if there isn't one.
+
+ >>> db = DB(None, {})
+ >>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True)
+ >>> q
+
+ >>> q.query()
+ 'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())'
+ >>> q.values()
+ [2, 'bob']
+ """
+ def q(x): return "(" + x + ")"
+
+ if values:
+ _keys = SQLQuery.join(values.keys(), ', ')
+ _values = SQLQuery.join([sqlparam(v) for v in values.values()], ', ')
+ sql_query = "INSERT INTO %s " % tablename + q(_keys) + ' VALUES ' + q(_values)
+ else:
+ sql_query = SQLQuery(self._get_insert_default_values_query(tablename))
+
+ if _test: return sql_query
+
+ db_cursor = self._db_cursor()
+ if seqname is not False:
+ sql_query = self._process_insert_query(sql_query, tablename, seqname)
+
+ if isinstance(sql_query, tuple):
+ # for some databases, a separate query has to be made to find
+ # the id of the inserted row.
+ q1, q2 = sql_query
+ self._db_execute(db_cursor, q1)
+ self._db_execute(db_cursor, q2)
+ else:
+ self._db_execute(db_cursor, sql_query)
+
+ try:
+ out = db_cursor.fetchone()[0]
+ except Exception:
+ out = None
+
+ if not self.ctx.transactions:
+ self.ctx.commit()
+ return out
+
+ def _get_insert_default_values_query(self, table):
+ return "INSERT INTO %s DEFAULT VALUES" % table
+
+ def multiple_insert(self, tablename, values, seqname=None, _test=False):
+ """
+ Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries,
+ one for each row to be inserted, each with the same set of keys.
+ Returns the list of ids of the inserted rows.
+ Set `seqname` to the ID if it's not the default, or to `False`
+ if there isn't one.
+
+ >>> db = DB(None, {})
+ >>> db.supports_multiple_insert = True
+ >>> values = [{"name": "foo", "email": "foo@example.com"}, {"name": "bar", "email": "bar@example.com"}]
+ >>> db.multiple_insert('person', values=values, _test=True)
+
+ """
+ if not values:
+ return []
+
+ if not self.supports_multiple_insert:
+ out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values]
+ if seqname is False:
+ return None
+ else:
+ return out
+
+ keys = values[0].keys()
+ #@@ make sure all keys are valid
+
+ for v in values:
+ if v.keys() != keys:
+ raise ValueError, 'Not all rows have the same keys'
+
+ sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys)))
+
+ for i, row in enumerate(values):
+ if i != 0:
+ sql_query.append(", ")
+ SQLQuery.join([SQLParam(row[k]) for k in keys], sep=", ", target=sql_query, prefix="(", suffix=")")
+
+ if _test: return sql_query
+
+ db_cursor = self._db_cursor()
+ if seqname is not False:
+ sql_query = self._process_insert_query(sql_query, tablename, seqname)
+
+ if isinstance(sql_query, tuple):
+ # for some databases, a separate query has to be made to find
+ # the id of the inserted row.
+ q1, q2 = sql_query
+ self._db_execute(db_cursor, q1)
+ self._db_execute(db_cursor, q2)
+ else:
+ self._db_execute(db_cursor, sql_query)
+
+ try:
+ out = db_cursor.fetchone()[0]
+ out = range(out-len(values)+1, out+1)
+ except Exception:
+ out = None
+
+ if not self.ctx.transactions:
+ self.ctx.commit()
+ return out
+
+
+ def update(self, tables, where, vars=None, _test=False, **values):
+ """
+ Update `tables` with clause `where` (interpolated using `vars`)
+ and setting `values`.
+
+ >>> db = DB(None, {})
+ >>> name = 'Joseph'
+ >>> q = db.update('foo', where='name = $name', name='bob', age=2,
+ ... created=SQLLiteral('NOW()'), vars=locals(), _test=True)
+ >>> q
+
+ >>> q.query()
+ 'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s'
+ >>> q.values()
+ [2, 'bob', 'Joseph']
+ """
+ if vars is None: vars = {}
+ where = self._where(where, vars)
+
+ query = (
+ "UPDATE " + sqllist(tables) +
+ " SET " + sqlwhere(values, ', ') +
+ " WHERE " + where)
+
+ if _test: return query
+
+ db_cursor = self._db_cursor()
+ self._db_execute(db_cursor, query)
+ if not self.ctx.transactions:
+ self.ctx.commit()
+ return db_cursor.rowcount
+
+ def delete(self, table, where, using=None, vars=None, _test=False):
+ """
+ Deletes from `table` with clauses `where` and `using`.
+
+ >>> db = DB(None, {})
+ >>> name = 'Joe'
+ >>> db.delete('foo', where='name = $name', vars=locals(), _test=True)
+
+ """
+ if vars is None: vars = {}
+ where = self._where(where, vars)
+
+ q = 'DELETE FROM ' + table
+ if using: q += ' USING ' + sqllist(using)
+ if where: q += ' WHERE ' + where
+
+ if _test: return q
+
+ db_cursor = self._db_cursor()
+ self._db_execute(db_cursor, q)
+ if not self.ctx.transactions:
+ self.ctx.commit()
+ return db_cursor.rowcount
+
+ def _process_insert_query(self, query, tablename, seqname):
+ return query
+
+ def transaction(self):
+ """Start a transaction."""
+ return Transaction(self.ctx)
+
+class PostgresDB(DB):
+ """Postgres driver."""
+ def __init__(self, **keywords):
+ if 'pw' in keywords:
+ keywords['password'] = keywords.pop('pw')
+
+ db_module = import_driver(["psycopg2", "psycopg", "pgdb"], preferred=keywords.pop('driver', None))
+ if db_module.__name__ == "psycopg2":
+ import psycopg2.extensions
+ psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
+ if db_module.__name__ == "pgdb" and 'port' in keywords:
+ keywords["host"] += ":" + str(keywords.pop('port'))
+
+ # if db is not provided postgres driver will take it from PGDATABASE environment variable
+ if 'db' in keywords:
+ keywords['database'] = keywords.pop('db')
+
+ self.dbname = "postgres"
+ self.paramstyle = db_module.paramstyle
+ DB.__init__(self, db_module, keywords)
+ self.supports_multiple_insert = True
+ self._sequences = None
+
+ def _process_insert_query(self, query, tablename, seqname):
+ if seqname is None:
+ # when seqname is not provided guess the seqname and make sure it exists
+ seqname = tablename + "_id_seq"
+ if seqname not in self._get_all_sequences():
+ seqname = None
+
+ if seqname:
+ query += "; SELECT currval('%s')" % seqname
+
+ return query
+
+ def _get_all_sequences(self):
+ """Query postgres to find names of all sequences used in this database."""
+ if self._sequences is None:
+ q = "SELECT c.relname FROM pg_class c WHERE c.relkind = 'S'"
+ self._sequences = set([c.relname for c in self.query(q)])
+ return self._sequences
+
+ def _connect(self, keywords):
+ conn = DB._connect(self, keywords)
+ try:
+ conn.set_client_encoding('UTF8')
+ except AttributeError:
+ # fallback for pgdb driver
+ conn.cursor().execute("set client_encoding to 'UTF-8'")
+ return conn
+
+ def _connect_with_pooling(self, keywords):
+ conn = DB._connect_with_pooling(self, keywords)
+ conn._con._con.set_client_encoding('UTF8')
+ return conn
+
+class MySQLDB(DB):
+ def __init__(self, **keywords):
+ import MySQLdb as db
+ if 'pw' in keywords:
+ keywords['passwd'] = keywords['pw']
+ del keywords['pw']
+
+ if 'charset' not in keywords:
+ keywords['charset'] = 'utf8'
+ elif keywords['charset'] is None:
+ del keywords['charset']
+
+ self.paramstyle = db.paramstyle = 'pyformat' # it's both, like psycopg
+ self.dbname = "mysql"
+ DB.__init__(self, db, keywords)
+ self.supports_multiple_insert = True
+
+ def _process_insert_query(self, query, tablename, seqname):
+ return query, SQLQuery('SELECT last_insert_id();')
+
+ def _get_insert_default_values_query(self, table):
+ return "INSERT INTO %s () VALUES()" % table
+
+def import_driver(drivers, preferred=None):
+ """Import the first available driver or preferred driver.
+ """
+ if preferred:
+ drivers = [preferred]
+
+ for d in drivers:
+ try:
+ return __import__(d, None, None, ['x'])
+ except ImportError:
+ pass
+ raise ImportError("Unable to import " + " or ".join(drivers))
+
+class SqliteDB(DB):
+ def __init__(self, **keywords):
+ db = import_driver(["sqlite3", "pysqlite2.dbapi2", "sqlite"], preferred=keywords.pop('driver', None))
+
+ if db.__name__ in ["sqlite3", "pysqlite2.dbapi2"]:
+ db.paramstyle = 'qmark'
+
+ # sqlite driver doesn't create datatime objects for timestamp columns unless `detect_types` option is passed.
+ # It seems to be supported in sqlite3 and pysqlite2 drivers, not surte about sqlite.
+ keywords.setdefault('detect_types', db.PARSE_DECLTYPES)
+
+ self.paramstyle = db.paramstyle
+ keywords['database'] = keywords.pop('db')
+ keywords['pooling'] = False # sqlite don't allows connections to be shared by threads
+ self.dbname = "sqlite"
+ DB.__init__(self, db, keywords)
+
+ def _process_insert_query(self, query, tablename, seqname):
+ return query, SQLQuery('SELECT last_insert_rowid();')
+
+ def query(self, *a, **kw):
+ out = DB.query(self, *a, **kw)
+ if isinstance(out, iterbetter):
+ del out.__len__
+ return out
+
+class FirebirdDB(DB):
+ """Firebird Database.
+ """
+ def __init__(self, **keywords):
+ try:
+ import kinterbasdb as db
+ except Exception:
+ db = None
+ pass
+ if 'pw' in keywords:
+ keywords['password'] = keywords.pop('pw')
+ keywords['database'] = keywords.pop('db')
+
+ self.paramstyle = db.paramstyle
+
+ DB.__init__(self, db, keywords)
+
+ def delete(self, table, where=None, using=None, vars=None, _test=False):
+ # firebird doesn't support using clause
+ using=None
+ return DB.delete(self, table, where, using, vars, _test)
+
+ def sql_clauses(self, what, tables, where, group, order, limit, offset):
+ return (
+ ('SELECT', ''),
+ ('FIRST', limit),
+ ('SKIP', offset),
+ ('', what),
+ ('FROM', sqllist(tables)),
+ ('WHERE', where),
+ ('GROUP BY', group),
+ ('ORDER BY', order)
+ )
+
+class MSSQLDB(DB):
+ def __init__(self, **keywords):
+ import pymssql as db
+ if 'pw' in keywords:
+ keywords['password'] = keywords.pop('pw')
+ keywords['database'] = keywords.pop('db')
+ self.dbname = "mssql"
+ DB.__init__(self, db, keywords)
+
+ def _process_query(self, sql_query):
+ """Takes the SQLQuery object and returns query string and parameters.
+ """
+ # MSSQLDB expects params to be a tuple.
+ # Overwriting the default implementation to convert params to tuple.
+ paramstyle = getattr(self, 'paramstyle', 'pyformat')
+ query = sql_query.query(paramstyle)
+ params = sql_query.values()
+ return query, tuple(params)
+
+ def sql_clauses(self, what, tables, where, group, order, limit, offset):
+ return (
+ ('SELECT', what),
+ ('TOP', limit),
+ ('FROM', sqllist(tables)),
+ ('WHERE', where),
+ ('GROUP BY', group),
+ ('ORDER BY', order),
+ ('OFFSET', offset))
+
+ def _test(self):
+ """Test LIMIT.
+
+ Fake presence of pymssql module for running tests.
+ >>> import sys
+ >>> sys.modules['pymssql'] = sys.modules['sys']
+
+ MSSQL has TOP clause instead of LIMIT clause.
+ >>> db = MSSQLDB(db='test', user='joe', pw='secret')
+ >>> db.select('foo', limit=4, _test=True)
+
+ """
+ pass
+
+class OracleDB(DB):
+ def __init__(self, **keywords):
+ import cx_Oracle as db
+ if 'pw' in keywords:
+ keywords['password'] = keywords.pop('pw')
+
+ #@@ TODO: use db.makedsn if host, port is specified
+ keywords['dsn'] = keywords.pop('db')
+ self.dbname = 'oracle'
+ db.paramstyle = 'numeric'
+ self.paramstyle = db.paramstyle
+
+ # oracle doesn't support pooling
+ keywords.pop('pooling', None)
+ DB.__init__(self, db, keywords)
+
+ def _process_insert_query(self, query, tablename, seqname):
+ if seqname is None:
+ # It is not possible to get seq name from table name in Oracle
+ return query
+ else:
+ return query + "; SELECT %s.currval FROM dual" % seqname
+
+def dburl2dict(url):
+ """
+ Takes a URL to a database and parses it into an equivalent dictionary.
+
+ >>> dburl2dict('postgres:///mygreatdb')
+ {'pw': None, 'dbn': 'postgres', 'db': 'mygreatdb', 'host': None, 'user': None, 'port': None}
+ >>> dburl2dict('postgres://james:day@serverfarm.example.net:5432/mygreatdb')
+ {'pw': 'day', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': 5432}
+ >>> dburl2dict('postgres://james:day@serverfarm.example.net/mygreatdb')
+ {'pw': 'day', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
+ >>> dburl2dict('postgres://james:d%40y@serverfarm.example.net/mygreatdb')
+ {'pw': 'd@y', 'dbn': 'postgres', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
+ >>> dburl2dict('mysql://james:d%40y@serverfarm.example.net/mygreatdb')
+ {'pw': 'd@y', 'dbn': 'mysql', 'db': 'mygreatdb', 'host': 'serverfarm.example.net', 'user': 'james', 'port': None}
+ """
+ parts = urlparse.urlparse(urllib.unquote(url))
+
+ return {'dbn': parts.scheme,
+ 'user': parts.username,
+ 'pw': parts.password,
+ 'db': parts.path[1:],
+ 'host': parts.hostname,
+ 'port': parts.port}
+
+_databases = {}
+def database(dburl=None, **params):
+ """Creates appropriate database using params.
+
+ Pooling will be enabled if DBUtils module is available.
+ Pooling can be disabled by passing pooling=False in params.
+ """
+ if not dburl and not params:
+ dburl = os.environ['DATABASE_URL']
+ if dburl:
+ params = dburl2dict(dburl)
+ dbn = params.pop('dbn')
+ if dbn in _databases:
+ return _databases[dbn](**params)
+ else:
+ raise UnknownDB, dbn
+
+def register_database(name, clazz):
+ """
+ Register a database.
+
+ >>> class LegacyDB(DB):
+ ... def __init__(self, **params):
+ ... pass
+ ...
+ >>> register_database('legacy', LegacyDB)
+ >>> db = database(dbn='legacy', db='test', user='joe', passwd='secret')
+ """
+ _databases[name] = clazz
+
+register_database('mysql', MySQLDB)
+register_database('postgres', PostgresDB)
+register_database('sqlite', SqliteDB)
+register_database('firebird', FirebirdDB)
+register_database('mssql', MSSQLDB)
+register_database('oracle', OracleDB)
+
+def _interpolate(format):
+ """
+ Takes a format string and returns a list of 2-tuples of the form
+ (boolean, string) where boolean says whether string should be evaled
+ or not.
+
+ from (public domain, Ka-Ping Yee)
+ """
+ from tokenize import tokenprog
+
+ def matchorfail(text, pos):
+ match = tokenprog.match(text, pos)
+ if match is None:
+ raise _ItplError(text, pos)
+ return match, match.end()
+
+ namechars = "abcdefghijklmnopqrstuvwxyz" \
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
+ chunks = []
+ pos = 0
+
+ while 1:
+ dollar = format.find("$", pos)
+ if dollar < 0:
+ break
+ nextchar = format[dollar + 1]
+
+ if nextchar == "{":
+ chunks.append((0, format[pos:dollar]))
+ pos, level = dollar + 2, 1
+ while level:
+ match, pos = matchorfail(format, pos)
+ tstart, tend = match.regs[3]
+ token = format[tstart:tend]
+ if token == "{":
+ level = level + 1
+ elif token == "}":
+ level = level - 1
+ chunks.append((1, format[dollar + 2:pos - 1]))
+
+ elif nextchar in namechars:
+ chunks.append((0, format[pos:dollar]))
+ match, pos = matchorfail(format, dollar + 1)
+ while pos < len(format):
+ if format[pos] == "." and \
+ pos + 1 < len(format) and format[pos + 1] in namechars:
+ match, pos = matchorfail(format, pos + 1)
+ elif format[pos] in "([":
+ pos, level = pos + 1, 1
+ while level:
+ match, pos = matchorfail(format, pos)
+ tstart, tend = match.regs[3]
+ token = format[tstart:tend]
+ if token[0] in "([":
+ level = level + 1
+ elif token[0] in ")]":
+ level = level - 1
+ else:
+ break
+ chunks.append((1, format[dollar + 1:pos]))
+ else:
+ chunks.append((0, format[pos:dollar + 1]))
+ pos = dollar + 1 + (nextchar == "$")
+
+ if pos < len(format):
+ chunks.append((0, format[pos:]))
+ return chunks
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/lib/nulib/python/nulib/ext/web/debugerror.py b/lib/nulib/python/nulib/ext/web/debugerror.py
new file mode 100644
index 0000000..656d812
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/debugerror.py
@@ -0,0 +1,354 @@
+"""
+pretty debug errors
+(part of web.py)
+
+portions adapted from Django
+Copyright (c) 2005, the Lawrence Journal-World
+Used under the modified BSD license:
+http://www.xfree86.org/3.3.6/COPYRIGHT2.html#5
+"""
+
+__all__ = ["debugerror", "djangoerror", "emailerrors"]
+
+import sys, urlparse, pprint, traceback
+from template import Template
+from net import websafe
+from utils import sendmail, safestr
+import webapi as web
+
+import os, os.path
+whereami = os.path.join(os.getcwd(), __file__)
+whereami = os.path.sep.join(whereami.split(os.path.sep)[:-1])
+djangoerror_t = """\
+$def with (exception_type, exception_value, frames)
+
+
+
+
+
+ $exception_type at $ctx.path
+
+
+
+
+
+$def dicttable (d, kls='req', id=None):
+ $ items = d and d.items() or []
+ $items.sort()
+ $:dicttable_items(items, kls, id)
+
+$def dicttable_items(items, kls='req', id=None):
+ $if items:
+
Variable
Value
+
+ $for k, v in items:
+
$k
$prettify(v)
+
+
+ $else:
+
No data.
+
+
+
$exception_type at $ctx.path
+
$exception_value
+
+
Python
+
$frames[0].filename in $frames[0].function, line $frames[0].lineno
+
+
Web
+
$ctx.method $ctx.home$ctx.path
+
+
+
+
Traceback (innermost first)
+
+$for frame in frames:
+
+ $frame.filename in $frame.function
+ $if frame.context_line is not None:
+
+ $if frame.pre_context:
+
+ $for line in frame.pre_context:
+
$line
+
+
$frame.context_line ...
+ $if frame.post_context:
+
+ $for line in frame.post_context:
+
$line
+
+
+
+ $if frame.vars:
+
+ ▶ Local vars
+ $# $inspect.formatargvalues(*inspect.getargvalues(frame['tb'].tb_frame))
+
+$ newctx = [(k, v) for (k, v) in ctx.iteritems() if not k.startswith('_') and not isinstance(v, dict)]
+$:dicttable(dict(newctx))
+
+
ENVIRONMENT
+$:dicttable(ctx.env)
+
+
+
+
+ You're seeing this error because you have web.config.debug
+ set to True. Set that to False if you don't want to see this.
+
+
+
+
+
+"""
+
+djangoerror_r = None
+
+def djangoerror():
+ def _get_lines_from_file(filename, lineno, context_lines):
+ """
+ Returns context_lines before and after lineno from file.
+ Returns (pre_context_lineno, pre_context, context_line, post_context).
+ """
+ try:
+ source = open(filename).readlines()
+ lower_bound = max(0, lineno - context_lines)
+ upper_bound = lineno + context_lines
+
+ pre_context = \
+ [line.strip('\n') for line in source[lower_bound:lineno]]
+ context_line = source[lineno].strip('\n')
+ post_context = \
+ [line.strip('\n') for line in source[lineno + 1:upper_bound]]
+
+ return lower_bound, pre_context, context_line, post_context
+ except (OSError, IOError, IndexError):
+ return None, [], None, []
+
+ exception_type, exception_value, tback = sys.exc_info()
+ frames = []
+ while tback is not None:
+ filename = tback.tb_frame.f_code.co_filename
+ function = tback.tb_frame.f_code.co_name
+ lineno = tback.tb_lineno - 1
+
+ # hack to get correct line number for templates
+ lineno += tback.tb_frame.f_locals.get("__lineoffset__", 0)
+
+ pre_context_lineno, pre_context, context_line, post_context = \
+ _get_lines_from_file(filename, lineno, 7)
+
+ if '__hidetraceback__' not in tback.tb_frame.f_locals:
+ frames.append(web.storage({
+ 'tback': tback,
+ 'filename': filename,
+ 'function': function,
+ 'lineno': lineno,
+ 'vars': tback.tb_frame.f_locals,
+ 'id': id(tback),
+ 'pre_context': pre_context,
+ 'context_line': context_line,
+ 'post_context': post_context,
+ 'pre_context_lineno': pre_context_lineno,
+ }))
+ tback = tback.tb_next
+ frames.reverse()
+ urljoin = urlparse.urljoin
+ def prettify(x):
+ try:
+ out = pprint.pformat(x)
+ except Exception, e:
+ out = '[could not display: <' + e.__class__.__name__ + \
+ ': '+str(e)+'>]'
+ return out
+
+ global djangoerror_r
+ if djangoerror_r is None:
+ djangoerror_r = Template(djangoerror_t, filename=__file__, filter=websafe)
+
+ t = djangoerror_r
+ globals = {'ctx': web.ctx, 'web':web, 'dict':dict, 'str':str, 'prettify': prettify}
+ t.t.func_globals.update(globals)
+ return t(exception_type, exception_value, frames)
+
+def debugerror():
+ """
+ A replacement for `internalerror` that presents a nice page with lots
+ of debug information for the programmer.
+
+ (Based on the beautiful 500 page from [Django](http://djangoproject.com/),
+ designed by [Wilson Miner](http://wilsonminer.com/).)
+ """
+ return web._InternalError(djangoerror())
+
+def emailerrors(to_address, olderror, from_address=None):
+ """
+ Wraps the old `internalerror` handler (pass as `olderror`) to
+ additionally email all errors to `to_address`, to aid in
+ debugging production websites.
+
+ Emails contain a normal text traceback as well as an
+ attachment containing the nice `debugerror` page.
+ """
+ from_address = from_address or to_address
+
+ def emailerrors_internal():
+ error = olderror()
+ tb = sys.exc_info()
+ error_name = tb[0]
+ error_value = tb[1]
+ tb_txt = ''.join(traceback.format_exception(*tb))
+ path = web.ctx.path
+ request = web.ctx.method + ' ' + web.ctx.home + web.ctx.fullpath
+
+ message = "\n%s\n\n%s\n\n" % (request, tb_txt)
+
+ sendmail(
+ "your buggy site <%s>" % from_address,
+ "the bugfixer <%s>" % to_address,
+ "bug: %(error_name)s: %(error_value)s (%(path)s)" % locals(),
+ message,
+ attachments=[
+ dict(filename="bug.html", content=safestr(djangoerror()))
+ ],
+ )
+ return error
+
+ return emailerrors_internal
+
+if __name__ == "__main__":
+ urls = (
+ '/', 'index'
+ )
+ from application import application
+ app = application(urls, globals())
+ app.internalerror = debugerror
+
+ class index:
+ def GET(self):
+ thisdoesnotexist
+
+ app.run()
diff --git a/lib/nulib/python/nulib/ext/web/form.py b/lib/nulib/python/nulib/ext/web/form.py
new file mode 100644
index 0000000..f2f836c
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/form.py
@@ -0,0 +1,416 @@
+"""
+HTML forms
+(part of web.py)
+"""
+
+import copy, re
+import webapi as web
+import utils, net
+
+def attrget(obj, attr, value=None):
+ try:
+ if hasattr(obj, 'has_key') and obj.has_key(attr):
+ return obj[attr]
+ except TypeError:
+ # Handle the case where has_key takes different number of arguments.
+ # This is the case with Model objects on appengine. See #134
+ pass
+ if hasattr(obj, attr):
+ return getattr(obj, attr)
+ return value
+
+class Form(object):
+ r"""
+ HTML form.
+
+ >>> f = Form(Textbox("x"))
+ >>> f.render()
+ u'
\n
\n
'
+ """
+ def __init__(self, *inputs, **kw):
+ self.inputs = inputs
+ self.valid = True
+ self.note = None
+ self.validators = kw.pop('validators', [])
+
+ def __call__(self, x=None):
+ o = copy.deepcopy(self)
+ if x: o.validates(x)
+ return o
+
+ def render(self):
+ out = ''
+ out += self.rendernote(self.note)
+ out += '
\n'
+
+ for i in self.inputs:
+ html = utils.safeunicode(i.pre) + i.render() + self.rendernote(i.note) + utils.safeunicode(i.post)
+ if i.is_hidden():
+ out += '
%s
\n' % (html)
+ else:
+ out += '
%s
\n' % (i.id, net.websafe(i.description), html)
+ out += "
"
+ return out
+
+ def render_css(self):
+ out = []
+ out.append(self.rendernote(self.note))
+ for i in self.inputs:
+ if not i.is_hidden():
+ out.append('' % (i.id, net.websafe(i.description)))
+ out.append(i.pre)
+ out.append(i.render())
+ out.append(self.rendernote(i.note))
+ out.append(i.post)
+ out.append('\n')
+ return ''.join(out)
+
+ def rendernote(self, note):
+ if note: return '%s' % net.websafe(note)
+ else: return ""
+
+ def validates(self, source=None, _validate=True, **kw):
+ source = source or kw or web.input()
+ out = True
+ for i in self.inputs:
+ v = attrget(source, i.name)
+ if _validate:
+ out = i.validate(v) and out
+ else:
+ i.set_value(v)
+ if _validate:
+ out = out and self._validate(source)
+ self.valid = out
+ return out
+
+ def _validate(self, value):
+ self.value = value
+ for v in self.validators:
+ if not v.valid(value):
+ self.note = v.msg
+ return False
+ return True
+
+ def fill(self, source=None, **kw):
+ return self.validates(source, _validate=False, **kw)
+
+ def __getitem__(self, i):
+ for x in self.inputs:
+ if x.name == i: return x
+ raise KeyError, i
+
+ def __getattr__(self, name):
+ # don't interfere with deepcopy
+ inputs = self.__dict__.get('inputs') or []
+ for x in inputs:
+ if x.name == name: return x
+ raise AttributeError, name
+
+ def get(self, i, default=None):
+ try:
+ return self[i]
+ except KeyError:
+ return default
+
+ def _get_d(self): #@@ should really be form.attr, no?
+ return utils.storage([(i.name, i.get_value()) for i in self.inputs])
+ d = property(_get_d)
+
+class Input(object):
+ def __init__(self, name, *validators, **attrs):
+ self.name = name
+ self.validators = validators
+ self.attrs = attrs = AttributeList(attrs)
+
+ self.description = attrs.pop('description', name)
+ self.value = attrs.pop('value', None)
+ self.pre = attrs.pop('pre', "")
+ self.post = attrs.pop('post', "")
+ self.note = None
+
+ self.id = attrs.setdefault('id', self.get_default_id())
+
+ if 'class_' in attrs:
+ attrs['class'] = attrs['class_']
+ del attrs['class_']
+
+ def is_hidden(self):
+ return False
+
+ def get_type(self):
+ raise NotImplementedError
+
+ def get_default_id(self):
+ return self.name
+
+ def validate(self, value):
+ self.set_value(value)
+
+ for v in self.validators:
+ if not v.valid(value):
+ self.note = v.msg
+ return False
+ return True
+
+ def set_value(self, value):
+ self.value = value
+
+ def get_value(self):
+ return self.value
+
+ def render(self):
+ attrs = self.attrs.copy()
+ attrs['type'] = self.get_type()
+ if self.value is not None:
+ attrs['value'] = self.value
+ attrs['name'] = self.name
+ return '' % attrs
+
+ def rendernote(self, note):
+ if note: return '%s' % net.websafe(note)
+ else: return ""
+
+ def addatts(self):
+ # add leading space for backward-compatibility
+ return " " + str(self.attrs)
+
+class AttributeList(dict):
+ """List of atributes of input.
+
+ >>> a = AttributeList(type='text', name='x', value=20)
+ >>> a
+
+ """
+ def copy(self):
+ return AttributeList(self)
+
+ def __str__(self):
+ return " ".join(['%s="%s"' % (k, net.websafe(v)) for k, v in self.items()])
+
+ def __repr__(self):
+ return '' % repr(str(self))
+
+class Textbox(Input):
+ """Textbox input.
+
+ >>> Textbox(name='foo', value='bar').render()
+ u''
+ >>> Textbox(name='foo', value=0).render()
+ u''
+ """
+ def get_type(self):
+ return 'text'
+
+class Password(Input):
+ """Password input.
+
+ >>> Password(name='password', value='secret').render()
+ u''
+ """
+
+ def get_type(self):
+ return 'password'
+
+class Textarea(Input):
+ """Textarea input.
+
+ >>> Textarea(name='foo', value='bar').render()
+ u''
+ """
+ def render(self):
+ attrs = self.attrs.copy()
+ attrs['name'] = self.name
+ value = net.websafe(self.value or '')
+ return '' % (attrs, value)
+
+class Dropdown(Input):
+ r"""Dropdown/select input.
+
+ >>> Dropdown(name='foo', args=['a', 'b', 'c'], value='b').render()
+ u'\n'
+ >>> Dropdown(name='foo', args=[('a', 'aa'), ('b', 'bb'), ('c', 'cc')], value='b').render()
+ u'\n'
+ """
+ def __init__(self, name, args, *validators, **attrs):
+ self.args = args
+ super(Dropdown, self).__init__(name, *validators, **attrs)
+
+ def render(self):
+ attrs = self.attrs.copy()
+ attrs['name'] = self.name
+
+ x = '\n'
+ return x
+
+ def _render_option(self, arg, indent=' '):
+ if isinstance(arg, (tuple, list)):
+ value, desc= arg
+ else:
+ value, desc = arg, arg
+
+ value = utils.safestr(value)
+ if isinstance(self.value, (tuple, list)):
+ s_value = [utils.safestr(x) for x in self.value]
+ else:
+ s_value = utils.safestr(self.value)
+
+ if s_value == value or (isinstance(s_value, list) and value in s_value):
+ select_p = ' selected="selected"'
+ else:
+ select_p = ''
+ return indent + '\n' % (select_p, net.websafe(value), net.websafe(desc))
+
+
+class GroupedDropdown(Dropdown):
+ r"""Grouped Dropdown/select input.
+
+ >>> GroupedDropdown(name='car_type', args=(('Swedish Cars', ('Volvo', 'Saab')), ('German Cars', ('Mercedes', 'Audi'))), value='Audi').render()
+ u'\n'
+ >>> GroupedDropdown(name='car_type', args=(('Swedish Cars', (('v', 'Volvo'), ('s', 'Saab'))), ('German Cars', (('m', 'Mercedes'), ('a', 'Audi')))), value='a').render()
+ u'\n'
+
+ """
+ def __init__(self, name, args, *validators, **attrs):
+ self.args = args
+ super(Dropdown, self).__init__(name, *validators, **attrs)
+
+ def render(self):
+ attrs = self.attrs.copy()
+ attrs['name'] = self.name
+
+ x = '\n'
+ return x
+
+class Radio(Input):
+ def __init__(self, name, args, *validators, **attrs):
+ self.args = args
+ super(Radio, self).__init__(name, *validators, **attrs)
+
+ def render(self):
+ x = ''
+ for arg in self.args:
+ if isinstance(arg, (tuple, list)):
+ value, desc= arg
+ else:
+ value, desc = arg, arg
+ attrs = self.attrs.copy()
+ attrs['name'] = self.name
+ attrs['type'] = 'radio'
+ attrs['value'] = value
+ if self.value == value:
+ attrs['checked'] = 'checked'
+ x += ' %s' % (attrs, net.websafe(desc))
+ x += ''
+ return x
+
+class Checkbox(Input):
+ """Checkbox input.
+
+ >>> Checkbox('foo', value='bar', checked=True).render()
+ u''
+ >>> Checkbox('foo', value='bar').render()
+ u''
+ >>> c = Checkbox('foo', value='bar')
+ >>> c.validate('on')
+ True
+ >>> c.render()
+ u''
+ """
+ def __init__(self, name, *validators, **attrs):
+ self.checked = attrs.pop('checked', False)
+ Input.__init__(self, name, *validators, **attrs)
+
+ def get_default_id(self):
+ value = utils.safestr(self.value or "")
+ return self.name + '_' + value.replace(' ', '_')
+
+ def render(self):
+ attrs = self.attrs.copy()
+ attrs['type'] = 'checkbox'
+ attrs['name'] = self.name
+ attrs['value'] = self.value
+
+ if self.checked:
+ attrs['checked'] = 'checked'
+ return '' % attrs
+
+ def set_value(self, value):
+ self.checked = bool(value)
+
+ def get_value(self):
+ return self.checked
+
+class Button(Input):
+ """HTML Button.
+
+ >>> Button("save").render()
+ u''
+ >>> Button("action", value="save", html="Save Changes").render()
+ u''
+ """
+ def __init__(self, name, *validators, **attrs):
+ super(Button, self).__init__(name, *validators, **attrs)
+ self.description = ""
+
+ def render(self):
+ attrs = self.attrs.copy()
+ attrs['name'] = self.name
+ if self.value is not None:
+ attrs['value'] = self.value
+ html = attrs.pop('html', None) or net.websafe(self.name)
+ return '' % (attrs, html)
+
+class Hidden(Input):
+ """Hidden Input.
+
+ >>> Hidden(name='foo', value='bar').render()
+ u''
+ """
+ def is_hidden(self):
+ return True
+
+ def get_type(self):
+ return 'hidden'
+
+class File(Input):
+ """File input.
+
+ >>> File(name='f').render()
+ u''
+ """
+ def get_type(self):
+ return 'file'
+
+class Validator:
+ def __deepcopy__(self, memo): return copy.copy(self)
+ def __init__(self, msg, test, jstest=None): utils.autoassign(self, locals())
+ def valid(self, value):
+ try: return self.test(value)
+ except: return False
+
+notnull = Validator("Required", bool)
+
+class regexp(Validator):
+ def __init__(self, rexp, msg):
+ self.rexp = re.compile(rexp)
+ self.msg = msg
+
+ def valid(self, value):
+ return bool(self.rexp.match(value))
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/lib/nulib/python/nulib/ext/web/http.py b/lib/nulib/python/nulib/ext/web/http.py
new file mode 100644
index 0000000..da67eba
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/http.py
@@ -0,0 +1,150 @@
+"""
+HTTP Utilities
+(from web.py)
+"""
+
+__all__ = [
+ "expires", "lastmodified",
+ "prefixurl", "modified",
+ "changequery", "url",
+ "profiler",
+]
+
+import sys, os, threading, urllib, urlparse
+try: import datetime
+except ImportError: pass
+import net, utils, webapi as web
+
+def prefixurl(base=''):
+ """
+ Sorry, this function is really difficult to explain.
+ Maybe some other time.
+ """
+ url = web.ctx.path.lstrip('/')
+ for i in xrange(url.count('/')):
+ base += '../'
+ if not base:
+ base = './'
+ return base
+
+def expires(delta):
+ """
+ Outputs an `Expires` header for `delta` from now.
+ `delta` is a `timedelta` object or a number of seconds.
+ """
+ if isinstance(delta, (int, long)):
+ delta = datetime.timedelta(seconds=delta)
+ date_obj = datetime.datetime.utcnow() + delta
+ web.header('Expires', net.httpdate(date_obj))
+
+def lastmodified(date_obj):
+ """Outputs a `Last-Modified` header for `datetime`."""
+ web.header('Last-Modified', net.httpdate(date_obj))
+
+def modified(date=None, etag=None):
+ """
+ Checks to see if the page has been modified since the version in the
+ requester's cache.
+
+ When you publish pages, you can include `Last-Modified` and `ETag`
+ with the date the page was last modified and an opaque token for
+ the particular version, respectively. When readers reload the page,
+ the browser sends along the modification date and etag value for
+ the version it has in its cache. If the page hasn't changed,
+ the server can just return `304 Not Modified` and not have to
+ send the whole page again.
+
+ This function takes the last-modified date `date` and the ETag `etag`
+ and checks the headers to see if they match. If they do, it returns
+ `True`, or otherwise it raises NotModified error. It also sets
+ `Last-Modified` and `ETag` output headers.
+ """
+ try:
+ from __builtin__ import set
+ except ImportError:
+ # for python 2.3
+ from sets import Set as set
+
+ n = set([x.strip('" ') for x in web.ctx.env.get('HTTP_IF_NONE_MATCH', '').split(',')])
+ m = net.parsehttpdate(web.ctx.env.get('HTTP_IF_MODIFIED_SINCE', '').split(';')[0])
+ validate = False
+ if etag:
+ if '*' in n or etag in n:
+ validate = True
+ if date and m:
+ # we subtract a second because
+ # HTTP dates don't have sub-second precision
+ if date-datetime.timedelta(seconds=1) <= m:
+ validate = True
+
+ if date: lastmodified(date)
+ if etag: web.header('ETag', '"' + etag + '"')
+ if validate:
+ raise web.notmodified()
+ else:
+ return True
+
+def urlencode(query, doseq=0):
+ """
+ Same as urllib.urlencode, but supports unicode strings.
+
+ >>> urlencode({'text':'foo bar'})
+ 'text=foo+bar'
+ >>> urlencode({'x': [1, 2]}, doseq=True)
+ 'x=1&x=2'
+ """
+ def convert(value, doseq=False):
+ if doseq and isinstance(value, list):
+ return [convert(v) for v in value]
+ else:
+ return utils.safestr(value)
+
+ query = dict([(k, convert(v, doseq)) for k, v in query.items()])
+ return urllib.urlencode(query, doseq=doseq)
+
+def changequery(query=None, **kw):
+ """
+ Imagine you're at `/foo?a=1&b=2`. Then `changequery(a=3)` will return
+ `/foo?a=3&b=2` -- the same URL but with the arguments you requested
+ changed.
+ """
+ if query is None:
+ query = web.rawinput(method='get')
+ for k, v in kw.iteritems():
+ if v is None:
+ query.pop(k, None)
+ else:
+ query[k] = v
+ out = web.ctx.path
+ if query:
+ out += '?' + urlencode(query, doseq=True)
+ return out
+
+def url(path=None, doseq=False, **kw):
+ """
+ Makes url by concatenating web.ctx.homepath and path and the
+ query string created using the arguments.
+ """
+ if path is None:
+ path = web.ctx.path
+ if path.startswith("/"):
+ out = web.ctx.homepath + path
+ else:
+ out = path
+
+ if kw:
+ out += '?' + urlencode(kw, doseq=doseq)
+
+ return out
+
+def profiler(app):
+ """Outputs basic profiling information at the bottom of each response."""
+ from utils import profile
+ def profile_internal(e, o):
+ out, result = profile(app)(e, o)
+ return list(out) + ['
' + net.websafe(result) + '
']
+ return profile_internal
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/lib/nulib/python/nulib/ext/web/httpserver.py b/lib/nulib/python/nulib/ext/web/httpserver.py
new file mode 100644
index 0000000..24aad6b
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/httpserver.py
@@ -0,0 +1,334 @@
+__all__ = ["runsimple"]
+
+import sys, os
+from os import path
+import urlparse, posixpath, urllib
+from SimpleHTTPServer import SimpleHTTPRequestHandler
+import urllib
+import posixpath
+
+import webapi as web
+import net
+import utils
+
+def runbasic(func, server_address=("0.0.0.0", 8080)):
+ """
+ Runs a simple HTTP server hosting WSGI app `func`. The directory `static/`
+ is hosted statically.
+
+ Based on [WsgiServer][ws] from [Colin Stewart][cs].
+
+ [ws]: http://www.owlfish.com/software/wsgiutils/documentation/wsgi-server-api.html
+ [cs]: http://www.owlfish.com/
+ """
+ # Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
+ # Modified somewhat for simplicity
+ # Used under the modified BSD license:
+ # http://www.xfree86.org/3.3.6/COPYRIGHT2.html#5
+
+ import SimpleHTTPServer, SocketServer, BaseHTTPServer, urlparse
+ import socket, errno
+ import traceback
+
+ class WSGIHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
+ def run_wsgi_app(self):
+ protocol, host, path, parameters, query, fragment = \
+ urlparse.urlparse('http://dummyhost%s' % self.path)
+
+ # we only use path, query
+ env = {'wsgi.version': (1, 0)
+ ,'wsgi.url_scheme': 'http'
+ ,'wsgi.input': self.rfile
+ ,'wsgi.errors': sys.stderr
+ ,'wsgi.multithread': 1
+ ,'wsgi.multiprocess': 0
+ ,'wsgi.run_once': 0
+ ,'REQUEST_METHOD': self.command
+ ,'REQUEST_URI': self.path
+ ,'PATH_INFO': path
+ ,'QUERY_STRING': query
+ ,'CONTENT_TYPE': self.headers.get('Content-Type', '')
+ ,'CONTENT_LENGTH': self.headers.get('Content-Length', '')
+ ,'REMOTE_ADDR': self.client_address[0]
+ ,'SERVER_NAME': self.server.server_address[0]
+ ,'SERVER_PORT': str(self.server.server_address[1])
+ ,'SERVER_PROTOCOL': self.request_version
+ }
+
+ for http_header, http_value in self.headers.items():
+ env ['HTTP_%s' % http_header.replace('-', '_').upper()] = \
+ http_value
+
+ # Setup the state
+ self.wsgi_sent_headers = 0
+ self.wsgi_headers = []
+
+ try:
+ # We have there environment, now invoke the application
+ result = self.server.app(env, self.wsgi_start_response)
+ try:
+ try:
+ for data in result:
+ if data:
+ self.wsgi_write_data(data)
+ finally:
+ if hasattr(result, 'close'):
+ result.close()
+ except socket.error, socket_err:
+ # Catch common network errors and suppress them
+ if (socket_err.args[0] in \
+ (errno.ECONNABORTED, errno.EPIPE)):
+ return
+ except socket.timeout, socket_timeout:
+ return
+ except:
+ print >> web.debug, traceback.format_exc(),
+
+ if (not self.wsgi_sent_headers):
+ # We must write out something!
+ self.wsgi_write_data(" ")
+ return
+
+ do_POST = run_wsgi_app
+ do_PUT = run_wsgi_app
+ do_DELETE = run_wsgi_app
+
+ def do_GET(self):
+ if self.path.startswith('/static/'):
+ SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
+ else:
+ self.run_wsgi_app()
+
+ def wsgi_start_response(self, response_status, response_headers,
+ exc_info=None):
+ if (self.wsgi_sent_headers):
+ raise Exception \
+ ("Headers already sent and start_response called again!")
+ # Should really take a copy to avoid changes in the application....
+ self.wsgi_headers = (response_status, response_headers)
+ return self.wsgi_write_data
+
+ def wsgi_write_data(self, data):
+ if (not self.wsgi_sent_headers):
+ status, headers = self.wsgi_headers
+ # Need to send header prior to data
+ status_code = status[:status.find(' ')]
+ status_msg = status[status.find(' ') + 1:]
+ self.send_response(int(status_code), status_msg)
+ for header, value in headers:
+ self.send_header(header, value)
+ self.end_headers()
+ self.wsgi_sent_headers = 1
+ # Send the data
+ self.wfile.write(data)
+
+ class WSGIServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
+ def __init__(self, func, server_address):
+ BaseHTTPServer.HTTPServer.__init__(self,
+ server_address,
+ WSGIHandler)
+ self.app = func
+ self.serverShuttingDown = 0
+
+ #print "http://%s:%d/" % server_address
+ WSGIServer(func, server_address).serve_forever()
+
+# The WSGIServer instance.
+# Made global so that it can be stopped in embedded mode.
+server = None
+
+def runsimple(func, server_address=("0.0.0.0", 8080)):
+ """
+ Runs [CherryPy][cp] WSGI server hosting WSGI app `func`.
+ The directory `static/` is hosted statically.
+
+ [cp]: http://www.cherrypy.org
+ """
+ global server
+ func = StaticMiddleware(func)
+ func = LogMiddleware(func)
+
+ server = WSGIServer(server_address, func)
+
+ #if server.ssl_adapter:
+ # print "https://%s:%d/" % server_address
+ #else:
+ # print "http://%s:%d/" % server_address
+
+ try:
+ server.start()
+ except (KeyboardInterrupt, SystemExit):
+ server.stop()
+ server = None
+
+def WSGIServer(server_address, wsgi_app):
+ """Creates CherryPy WSGI server listening at `server_address` to serve `wsgi_app`.
+ This function can be overwritten to customize the webserver or use a different webserver.
+ """
+ import wsgiserver
+
+ # Default values of wsgiserver.ssl_adapters uses cherrypy.wsgiserver
+ # prefix. Overwriting it make it work with web.wsgiserver.
+ wsgiserver.ssl_adapters = {
+ 'builtin': 'web.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
+ 'pyopenssl': 'web.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter',
+ }
+
+ server = wsgiserver.CherryPyWSGIServer(server_address, wsgi_app, server_name="localhost")
+
+ def create_ssl_adapter(cert, key):
+ # wsgiserver tries to import submodules as cherrypy.wsgiserver.foo.
+ # That doesn't work as not it is web.wsgiserver.
+ # Patching sys.modules temporarily to make it work.
+ import types
+ cherrypy = types.ModuleType('cherrypy')
+ cherrypy.wsgiserver = wsgiserver
+ sys.modules['cherrypy'] = cherrypy
+ sys.modules['cherrypy.wsgiserver'] = wsgiserver
+
+ from wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
+ adapter = pyOpenSSLAdapter(cert, key)
+
+ # We are done with our work. Cleanup the patches.
+ del sys.modules['cherrypy']
+ del sys.modules['cherrypy.wsgiserver']
+
+ return adapter
+
+ # SSL backward compatibility
+ if (server.ssl_adapter is None and
+ getattr(server, 'ssl_certificate', None) and
+ getattr(server, 'ssl_private_key', None)):
+ server.ssl_adapter = create_ssl_adapter(server.ssl_certificate, server.ssl_private_key)
+
+ server.nodelay = not sys.platform.startswith('java') # TCP_NODELAY isn't supported on the JVM
+ return server
+
+class StaticApp(SimpleHTTPRequestHandler):
+ """WSGI application for serving static files."""
+ def __init__(self, environ, start_response):
+ self.headers = []
+ self.environ = environ
+ self.start_response = start_response
+
+ def translate_path(self, path):
+ path = urlparse.urlparse(path)[2]
+ path = posixpath.normpath(urllib.unquote(path))
+ words = path.split('/')
+ words = filter(None, words)
+ path = web.config.get('BASEDIR', os.getcwd())
+ for word in words:
+ _, word = os.path.splitdrive(word)
+ _, word = os.path.split(word)
+ if word in (os.curdir, os.pardir): continue
+ path = os.path.join(path, word)
+ return path
+
+ def send_response(self, status, msg=""):
+ self.status = str(status) + " " + msg
+
+ def send_header(self, name, value):
+ self.headers.append((name, value))
+
+ def end_headers(self):
+ pass
+
+ def log_message(*a): pass
+
+ def __iter__(self):
+ environ = self.environ
+
+ self.path = environ.get('PATH_INFO', '')
+ self.client_address = environ.get('REMOTE_ADDR','-'), \
+ environ.get('REMOTE_PORT','-')
+ self.command = environ.get('REQUEST_METHOD', '-')
+
+ from cStringIO import StringIO
+ self.wfile = StringIO() # for capturing error
+
+ try:
+ path = self.translate_path(self.path)
+ etag = '"%s"' % os.path.getmtime(path)
+ client_etag = environ.get('HTTP_IF_NONE_MATCH')
+ self.send_header('ETag', etag)
+ if etag == client_etag:
+ self.send_response(304, "Not Modified")
+ self.start_response(self.status, self.headers)
+ raise StopIteration
+ except OSError:
+ pass # Probably a 404
+
+ f = self.send_head()
+ self.start_response(self.status, self.headers)
+
+ if f:
+ block_size = 16 * 1024
+ while True:
+ buf = f.read(block_size)
+ if not buf:
+ break
+ yield buf
+ f.close()
+ else:
+ value = self.wfile.getvalue()
+ yield value
+
+class StaticMiddleware:
+ """WSGI middleware for serving static files."""
+ def __init__(self, app, prefix='/static/'):
+ self.app = app
+ self.prefix = prefix
+
+ def __call__(self, environ, start_response):
+ path = environ.get('PATH_INFO', '')
+ path = self.normpath(path)
+
+ if path.startswith(self.prefix):
+ return StaticApp(environ, start_response)
+ else:
+ return self.app(environ, start_response)
+
+ def normpath(self, path):
+ path2 = posixpath.normpath(urllib.unquote(path))
+ if path.endswith("/"):
+ path2 += "/"
+ return path2
+
+
+class LogMiddleware:
+ """WSGI middleware for logging the status."""
+ def __init__(self, app):
+ self.app = app
+ self.format = '%s - - [%s] "%s %s %s" - %s'
+
+ from BaseHTTPServer import BaseHTTPRequestHandler
+ import StringIO
+ f = StringIO.StringIO()
+
+ class FakeSocket:
+ def makefile(self, *a):
+ return f
+
+ # take log_date_time_string method from BaseHTTPRequestHandler
+ self.log_date_time_string = BaseHTTPRequestHandler(FakeSocket(), None, None).log_date_time_string
+
+ def __call__(self, environ, start_response):
+ def xstart_response(status, response_headers, *args):
+ out = start_response(status, response_headers, *args)
+ self.log(status, environ)
+ return out
+
+ return self.app(environ, xstart_response)
+
+ def log(self, status, environ):
+ outfile = environ.get('wsgi.errors', web.debug)
+ req = environ.get('PATH_INFO', '_')
+ protocol = environ.get('ACTUAL_SERVER_PROTOCOL', '-')
+ method = environ.get('REQUEST_METHOD', '-')
+ host = "%s:%s" % (environ.get('REMOTE_ADDR','-'),
+ environ.get('REMOTE_PORT','-'))
+
+ time = self.log_date_time_string()
+
+ msg = self.format % (host, time, protocol, method, req, status)
+ print >> outfile, utils.safestr(msg)
diff --git a/lib/nulib/python/nulib/ext/web/net.py b/lib/nulib/python/nulib/ext/web/net.py
new file mode 100644
index 0000000..b27fcb1
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/net.py
@@ -0,0 +1,244 @@
+"""
+Network Utilities
+(from web.py)
+"""
+
+__all__ = [
+ "validipaddr", "validip6addr", "validipport", "validip", "validaddr",
+ "urlquote",
+ "httpdate", "parsehttpdate",
+ "htmlquote", "htmlunquote", "websafe",
+]
+
+import urllib, time
+try: import datetime
+except ImportError: pass
+import re
+import socket
+
+def validip6addr(address):
+ """
+ Returns True if `address` is a valid IPv6 address.
+
+ >>> validip6addr('::')
+ True
+ >>> validip6addr('aaaa:bbbb:cccc:dddd::1')
+ True
+ >>> validip6addr('1:2:3:4:5:6:7:8:9:10')
+ False
+ >>> validip6addr('12:10')
+ False
+ """
+ try:
+ socket.inet_pton(socket.AF_INET6, address)
+ except (socket.error, AttributeError):
+ return False
+
+ return True
+
+def validipaddr(address):
+ """
+ Returns True if `address` is a valid IPv4 address.
+
+ >>> validipaddr('192.168.1.1')
+ True
+ >>> validipaddr('192.168.1.800')
+ False
+ >>> validipaddr('192.168.1')
+ False
+ """
+ try:
+ octets = address.split('.')
+ if len(octets) != 4:
+ return False
+ for x in octets:
+ if not (0 <= int(x) <= 255):
+ return False
+ except ValueError:
+ return False
+ return True
+
+def validipport(port):
+ """
+ Returns True if `port` is a valid IPv4 port.
+
+ >>> validipport('9000')
+ True
+ >>> validipport('foo')
+ False
+ >>> validipport('1000000')
+ False
+ """
+ try:
+ if not (0 <= int(port) <= 65535):
+ return False
+ except ValueError:
+ return False
+ return True
+
+def validip(ip, defaultaddr="0.0.0.0", defaultport=8080):
+ """
+ Returns `(ip_address, port)` from string `ip_addr_port`
+ >>> validip('1.2.3.4')
+ ('1.2.3.4', 8080)
+ >>> validip('80')
+ ('0.0.0.0', 80)
+ >>> validip('192.168.0.1:85')
+ ('192.168.0.1', 85)
+ >>> validip('::')
+ ('::', 8080)
+ >>> validip('[::]:88')
+ ('::', 88)
+ >>> validip('[::1]:80')
+ ('::1', 80)
+
+ """
+ addr = defaultaddr
+ port = defaultport
+
+ #Matt Boswell's code to check for ipv6 first
+ match = re.search(r'^\[([^]]+)\](?::(\d+))?$',ip) #check for [ipv6]:port
+ if match:
+ if validip6addr(match.group(1)):
+ if match.group(2):
+ if validipport(match.group(2)): return (match.group(1),int(match.group(2)))
+ else:
+ return (match.group(1),port)
+ else:
+ if validip6addr(ip): return (ip,port)
+ #end ipv6 code
+
+ ip = ip.split(":", 1)
+ if len(ip) == 1:
+ if not ip[0]:
+ pass
+ elif validipaddr(ip[0]):
+ addr = ip[0]
+ elif validipport(ip[0]):
+ port = int(ip[0])
+ else:
+ raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
+ elif len(ip) == 2:
+ addr, port = ip
+ if not validipaddr(addr) and validipport(port):
+ raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
+ port = int(port)
+ else:
+ raise ValueError, ':'.join(ip) + ' is not a valid IP address/port'
+ return (addr, port)
+
+def validaddr(string_):
+ """
+ Returns either (ip_address, port) or "/path/to/socket" from string_
+
+ >>> validaddr('/path/to/socket')
+ '/path/to/socket'
+ >>> validaddr('8000')
+ ('0.0.0.0', 8000)
+ >>> validaddr('127.0.0.1')
+ ('127.0.0.1', 8080)
+ >>> validaddr('127.0.0.1:8000')
+ ('127.0.0.1', 8000)
+ >>> validip('[::1]:80')
+ ('::1', 80)
+ >>> validaddr('fff')
+ Traceback (most recent call last):
+ ...
+ ValueError: fff is not a valid IP address/port
+ """
+ if '/' in string_:
+ return string_
+ else:
+ return validip(string_)
+
+def urlquote(val):
+ """
+ Quotes a string for use in a URL.
+
+ >>> urlquote('://?f=1&j=1')
+ '%3A//%3Ff%3D1%26j%3D1'
+ >>> urlquote(None)
+ ''
+ >>> urlquote(u'\u203d')
+ '%E2%80%BD'
+ """
+ if val is None: return ''
+ if not isinstance(val, unicode): val = str(val)
+ else: val = val.encode('utf-8')
+ return urllib.quote(val)
+
+def httpdate(date_obj):
+ """
+ Formats a datetime object for use in HTTP headers.
+
+ >>> import datetime
+ >>> httpdate(datetime.datetime(1970, 1, 1, 1, 1, 1))
+ 'Thu, 01 Jan 1970 01:01:01 GMT'
+ """
+ return date_obj.strftime("%a, %d %b %Y %H:%M:%S GMT")
+
+def parsehttpdate(string_):
+ """
+ Parses an HTTP date into a datetime object.
+
+ >>> parsehttpdate('Thu, 01 Jan 1970 01:01:01 GMT')
+ datetime.datetime(1970, 1, 1, 1, 1, 1)
+ """
+ try:
+ t = time.strptime(string_, "%a, %d %b %Y %H:%M:%S %Z")
+ except ValueError:
+ return None
+ return datetime.datetime(*t[:6])
+
+def htmlquote(text):
+ r"""
+ Encodes `text` for raw use in HTML.
+
+ >>> htmlquote(u"<'&\">")
+ u'<'&">'
+ """
+ text = text.replace(u"&", u"&") # Must be done first!
+ text = text.replace(u"<", u"<")
+ text = text.replace(u">", u">")
+ text = text.replace(u"'", u"'")
+ text = text.replace(u'"', u""")
+ return text
+
+def htmlunquote(text):
+ r"""
+ Decodes `text` that's HTML quoted.
+
+ >>> htmlunquote(u'<'&">')
+ u'<\'&">'
+ """
+ text = text.replace(u""", u'"')
+ text = text.replace(u"'", u"'")
+ text = text.replace(u">", u">")
+ text = text.replace(u"<", u"<")
+ text = text.replace(u"&", u"&") # Must be done last!
+ return text
+
+def websafe(val):
+ r"""Converts `val` so that it is safe for use in Unicode HTML.
+
+ >>> websafe("<'&\">")
+ u'<'&">'
+ >>> websafe(None)
+ u''
+ >>> websafe(u'\u203d')
+ u'\u203d'
+ >>> websafe('\xe2\x80\xbd')
+ u'\u203d'
+ """
+ if val is None:
+ return u''
+ elif isinstance(val, str):
+ val = val.decode('utf-8')
+ elif not isinstance(val, unicode):
+ val = unicode(val)
+
+ return htmlquote(val)
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/lib/nulib/python/nulib/ext/web/python23.py b/lib/nulib/python/nulib/ext/web/python23.py
new file mode 100644
index 0000000..dfb331a
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/python23.py
@@ -0,0 +1,46 @@
+"""Python 2.3 compatabilty"""
+import threading
+
+class threadlocal(object):
+ """Implementation of threading.local for python2.3.
+ """
+ def __getattribute__(self, name):
+ if name == "__dict__":
+ return threadlocal._getd(self)
+ else:
+ try:
+ return object.__getattribute__(self, name)
+ except AttributeError:
+ try:
+ return self.__dict__[name]
+ except KeyError:
+ raise AttributeError, name
+
+ def __setattr__(self, name, value):
+ self.__dict__[name] = value
+
+ def __delattr__(self, name):
+ try:
+ del self.__dict__[name]
+ except KeyError:
+ raise AttributeError, name
+
+ def _getd(self):
+ t = threading.currentThread()
+ if not hasattr(t, '_d'):
+ # using __dict__ of thread as thread local storage
+ t._d = {}
+
+ _id = id(self)
+ # there could be multiple instances of threadlocal.
+ # use id(self) as key
+ if _id not in t._d:
+ t._d[_id] = {}
+ return t._d[_id]
+
+if __name__ == '__main__':
+ d = threadlocal()
+ d.x = 1
+ print d.__dict__
+ print d.x
+
\ No newline at end of file
diff --git a/lib/nulib/python/nulib/ext/web/session.py b/lib/nulib/python/nulib/ext/web/session.py
new file mode 100644
index 0000000..a95c9d5
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/session.py
@@ -0,0 +1,358 @@
+"""
+Session Management
+(from web.py)
+"""
+
+import os, time, datetime, random, base64
+import os.path
+from copy import deepcopy
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+try:
+ import hashlib
+ sha1 = hashlib.sha1
+except ImportError:
+ import sha
+ sha1 = sha.new
+
+import utils
+import webapi as web
+
+__all__ = [
+ 'Session', 'SessionExpired',
+ 'Store', 'DiskStore', 'DBStore',
+]
+
+web.config.session_parameters = utils.storage({
+ 'cookie_name': 'webpy_session_id',
+ 'cookie_domain': None,
+ 'cookie_path' : None,
+ 'timeout': 86400, #24 * 60 * 60, # 24 hours in seconds
+ 'ignore_expiry': True,
+ 'ignore_change_ip': True,
+ 'secret_key': 'fLjUfxqXtfNoIldA0A0J',
+ 'expired_message': 'Session expired',
+ 'httponly': True,
+ 'secure': False
+})
+
+class SessionExpired(web.HTTPError):
+ def __init__(self, message):
+ web.HTTPError.__init__(self, '200 OK', {}, data=message)
+
+class Session(object):
+ """Session management for web.py
+ """
+ __slots__ = [
+ "store", "_initializer", "_last_cleanup_time", "_config", "_data",
+ "__getitem__", "__setitem__", "__delitem__"
+ ]
+
+ def __init__(self, app, store, initializer=None):
+ self.store = store
+ self._initializer = initializer
+ self._last_cleanup_time = 0
+ self._config = utils.storage(web.config.session_parameters)
+ self._data = utils.threadeddict()
+
+ self.__getitem__ = self._data.__getitem__
+ self.__setitem__ = self._data.__setitem__
+ self.__delitem__ = self._data.__delitem__
+
+ if app:
+ app.add_processor(self._processor)
+
+ def __contains__(self, name):
+ return name in self._data
+
+ def __getattr__(self, name):
+ return getattr(self._data, name)
+
+ def __setattr__(self, name, value):
+ if name in self.__slots__:
+ object.__setattr__(self, name, value)
+ else:
+ setattr(self._data, name, value)
+
+ def __delattr__(self, name):
+ delattr(self._data, name)
+
+ def _processor(self, handler):
+ """Application processor to setup session for every request"""
+ self._cleanup()
+ self._load()
+
+ try:
+ return handler()
+ finally:
+ self._save()
+
+ def _load(self):
+ """Load the session from the store, by the id from cookie"""
+ cookie_name = self._config.cookie_name
+ cookie_domain = self._config.cookie_domain
+ cookie_path = self._config.cookie_path
+ httponly = self._config.httponly
+ self.session_id = web.cookies().get(cookie_name)
+
+ # protection against session_id tampering
+ if self.session_id and not self._valid_session_id(self.session_id):
+ self.session_id = None
+
+ self._check_expiry()
+ if self.session_id:
+ d = self.store[self.session_id]
+ self.update(d)
+ self._validate_ip()
+
+ if not self.session_id:
+ self.session_id = self._generate_session_id()
+
+ if self._initializer:
+ if isinstance(self._initializer, dict):
+ self.update(deepcopy(self._initializer))
+ elif hasattr(self._initializer, '__call__'):
+ self._initializer()
+
+ self.ip = web.ctx.ip
+
+ def _check_expiry(self):
+ # check for expiry
+ if self.session_id and self.session_id not in self.store:
+ if self._config.ignore_expiry:
+ self.session_id = None
+ else:
+ return self.expired()
+
+ def _validate_ip(self):
+ # check for change of IP
+ if self.session_id and self.get('ip', None) != web.ctx.ip:
+ if not self._config.ignore_change_ip:
+ return self.expired()
+
+ def _save(self):
+ if not self.get('_killed'):
+ self._setcookie(self.session_id)
+ self.store[self.session_id] = dict(self._data)
+ else:
+ self._setcookie(self.session_id, expires=-1)
+
+ def _setcookie(self, session_id, expires='', **kw):
+ cookie_name = self._config.cookie_name
+ cookie_domain = self._config.cookie_domain
+ cookie_path = self._config.cookie_path
+ httponly = self._config.httponly
+ secure = self._config.secure
+ web.setcookie(cookie_name, session_id, expires=expires, domain=cookie_domain, httponly=httponly, secure=secure, path=cookie_path)
+
+ def _generate_session_id(self):
+ """Generate a random id for session"""
+
+ while True:
+ rand = os.urandom(16)
+ now = time.time()
+ secret_key = self._config.secret_key
+ session_id = sha1("%s%s%s%s" %(rand, now, utils.safestr(web.ctx.ip), secret_key))
+ session_id = session_id.hexdigest()
+ if session_id not in self.store:
+ break
+ return session_id
+
+ def _valid_session_id(self, session_id):
+ rx = utils.re_compile('^[0-9a-fA-F]+$')
+ return rx.match(session_id)
+
+ def _cleanup(self):
+ """Cleanup the stored sessions"""
+ current_time = time.time()
+ timeout = self._config.timeout
+ if current_time - self._last_cleanup_time > timeout:
+ self.store.cleanup(timeout)
+ self._last_cleanup_time = current_time
+
+ def expired(self):
+ """Called when an expired session is atime"""
+ self._killed = True
+ self._save()
+ raise SessionExpired(self._config.expired_message)
+
+ def kill(self):
+ """Kill the session, make it no longer available"""
+ del self.store[self.session_id]
+ self._killed = True
+
+class Store:
+ """Base class for session stores"""
+
+ def __contains__(self, key):
+ raise NotImplementedError
+
+ def __getitem__(self, key):
+ raise NotImplementedError
+
+ def __setitem__(self, key, value):
+ raise NotImplementedError
+
+ def cleanup(self, timeout):
+ """removes all the expired sessions"""
+ raise NotImplementedError
+
+ def encode(self, session_dict):
+ """encodes session dict as a string"""
+ pickled = pickle.dumps(session_dict)
+ return base64.encodestring(pickled)
+
+ def decode(self, session_data):
+ """decodes the data to get back the session dict """
+ pickled = base64.decodestring(session_data)
+ return pickle.loads(pickled)
+
+class DiskStore(Store):
+ """
+ Store for saving a session on disk.
+
+ >>> import tempfile
+ >>> root = tempfile.mkdtemp()
+ >>> s = DiskStore(root)
+ >>> s['a'] = 'foo'
+ >>> s['a']
+ 'foo'
+ >>> time.sleep(0.01)
+ >>> s.cleanup(0.01)
+ >>> s['a']
+ Traceback (most recent call last):
+ ...
+ KeyError: 'a'
+ """
+ def __init__(self, root):
+ # if the storage root doesn't exists, create it.
+ if not os.path.exists(root):
+ os.makedirs(
+ os.path.abspath(root)
+ )
+ self.root = root
+
+ def _get_path(self, key):
+ if os.path.sep in key:
+ raise ValueError, "Bad key: %s" % repr(key)
+ return os.path.join(self.root, key)
+
+ def __contains__(self, key):
+ path = self._get_path(key)
+ return os.path.exists(path)
+
+ def __getitem__(self, key):
+ path = self._get_path(key)
+ if os.path.exists(path):
+ pickled = open(path).read()
+ return self.decode(pickled)
+ else:
+ raise KeyError, key
+
+ def __setitem__(self, key, value):
+ path = self._get_path(key)
+ pickled = self.encode(value)
+ try:
+ f = open(path, 'w')
+ try:
+ f.write(pickled)
+ finally:
+ f.close()
+ except IOError:
+ pass
+
+ def __delitem__(self, key):
+ path = self._get_path(key)
+ if os.path.exists(path):
+ os.remove(path)
+
+ def cleanup(self, timeout):
+ now = time.time()
+ for f in os.listdir(self.root):
+ path = self._get_path(f)
+ atime = os.stat(path).st_atime
+ if now - atime > timeout :
+ os.remove(path)
+
+class DBStore(Store):
+ """Store for saving a session in database
+ Needs a table with the following columns:
+
+ session_id CHAR(128) UNIQUE NOT NULL,
+ atime DATETIME NOT NULL default current_timestamp,
+ data TEXT
+ """
+ def __init__(self, db, table_name):
+ self.db = db
+ self.table = table_name
+
+ def __contains__(self, key):
+ data = self.db.select(self.table, where="session_id=$key", vars=locals())
+ return bool(list(data))
+
+ def __getitem__(self, key):
+ now = datetime.datetime.now()
+ try:
+ s = self.db.select(self.table, where="session_id=$key", vars=locals())[0]
+ self.db.update(self.table, where="session_id=$key", atime=now, vars=locals())
+ except IndexError:
+ raise KeyError
+ else:
+ return self.decode(s.data)
+
+ def __setitem__(self, key, value):
+ pickled = self.encode(value)
+ now = datetime.datetime.now()
+ if key in self:
+ self.db.update(self.table, where="session_id=$key", data=pickled,atime=now, vars=locals())
+ else:
+ self.db.insert(self.table, False, session_id=key, atime=now, data=pickled )
+
+ def __delitem__(self, key):
+ self.db.delete(self.table, where="session_id=$key", vars=locals())
+
+ def cleanup(self, timeout):
+ timeout = datetime.timedelta(timeout/(24.0*60*60)) #timedelta takes numdays as arg
+ last_allowed_time = datetime.datetime.now() - timeout
+ self.db.delete(self.table, where="$last_allowed_time > atime", vars=locals())
+
+class ShelfStore:
+ """Store for saving session using `shelve` module.
+
+ import shelve
+ store = ShelfStore(shelve.open('session.shelf'))
+
+ XXX: is shelve thread-safe?
+ """
+ def __init__(self, shelf):
+ self.shelf = shelf
+
+ def __contains__(self, key):
+ return key in self.shelf
+
+ def __getitem__(self, key):
+ atime, v = self.shelf[key]
+ self[key] = v # update atime
+ return v
+
+ def __setitem__(self, key, value):
+ self.shelf[key] = time.time(), value
+
+ def __delitem__(self, key):
+ try:
+ del self.shelf[key]
+ except KeyError:
+ pass
+
+ def cleanup(self, timeout):
+ now = time.time()
+ for k in self.shelf.keys():
+ atime, v = self.shelf[k]
+ if now - atime > timeout :
+ del self[k]
+
+if __name__ == '__main__' :
+ import doctest
+ doctest.testmod()
diff --git a/lib/nulib/python/nulib/ext/web/template.py b/lib/nulib/python/nulib/ext/web/template.py
new file mode 100644
index 0000000..ff7d4ef
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/template.py
@@ -0,0 +1,1534 @@
+"""
+simple, elegant templating
+(part of web.py)
+
+Template design:
+
+Template string is split into tokens and the tokens are combined into nodes.
+Parse tree is a nodelist. TextNode and ExpressionNode are simple nodes and
+for-loop, if-loop etc are block nodes, which contain multiple child nodes.
+
+Each node can emit some python string. python string emitted by the
+root node is validated for safeeval and executed using python in the given environment.
+
+Enough care is taken to make sure the generated code and the template has line to line match,
+so that the error messages can point to exact line number in template. (It doesn't work in some cases still.)
+
+Grammar:
+
+ template -> defwith sections
+ defwith -> '$def with (' arguments ')' | ''
+ sections -> section*
+ section -> block | assignment | line
+
+ assignment -> '$ '
+ line -> (text|expr)*
+ text ->
+ expr -> '$' pyexpr | '$(' pyexpr ')' | '${' pyexpr '}'
+ pyexpr ->
+"""
+
+__all__ = [
+ "Template",
+ "Render", "render", "frender",
+ "ParseError", "SecurityError",
+ "test"
+]
+
+import tokenize
+import os
+import sys
+import glob
+import re
+from UserDict import DictMixin
+import warnings
+
+from utils import storage, safeunicode, safestr, re_compile
+from webapi import config
+from net import websafe
+
+def splitline(text):
+ r"""
+ Splits the given text at newline.
+
+ >>> splitline('foo\nbar')
+ ('foo\n', 'bar')
+ >>> splitline('foo')
+ ('foo', '')
+ >>> splitline('')
+ ('', '')
+ """
+ index = text.find('\n') + 1
+ if index:
+ return text[:index], text[index:]
+ else:
+ return text, ''
+
+class Parser:
+ """Parser Base.
+ """
+ def __init__(self):
+ self.statement_nodes = STATEMENT_NODES
+ self.keywords = KEYWORDS
+
+ def parse(self, text, name=""):
+ self.text = text
+ self.name = name
+
+ defwith, text = self.read_defwith(text)
+ suite = self.read_suite(text)
+ return DefwithNode(defwith, suite)
+
+ def read_defwith(self, text):
+ if text.startswith('$def with'):
+ defwith, text = splitline(text)
+ defwith = defwith[1:].strip() # strip $ and spaces
+ return defwith, text
+ else:
+ return '', text
+
+ def read_section(self, text):
+ r"""Reads one section from the given text.
+
+ section -> block | assignment | line
+
+ >>> read_section = Parser().read_section
+ >>> read_section('foo\nbar\n')
+ (, 'bar\n')
+ >>> read_section('$ a = b + 1\nfoo\n')
+ (, 'foo\n')
+
+ read_section('$for in range(10):\n hello $i\nfoo)
+ """
+ if text.lstrip(' ').startswith('$'):
+ index = text.index('$')
+ begin_indent, text2 = text[:index], text[index+1:]
+ ahead = self.python_lookahead(text2)
+
+ if ahead == 'var':
+ return self.read_var(text2)
+ elif ahead in self.statement_nodes:
+ return self.read_block_section(text2, begin_indent)
+ elif ahead in self.keywords:
+ return self.read_keyword(text2)
+ elif ahead.strip() == '':
+ # assignments starts with a space after $
+ # ex: $ a = b + 2
+ return self.read_assignment(text2)
+ return self.readline(text)
+
+ def read_var(self, text):
+ r"""Reads a var statement.
+
+ >>> read_var = Parser().read_var
+ >>> read_var('var x=10\nfoo')
+ (, 'foo')
+ >>> read_var('var x: hello $name\nfoo')
+ (, 'foo')
+ """
+ line, text = splitline(text)
+ tokens = self.python_tokens(line)
+ if len(tokens) < 4:
+ raise SyntaxError('Invalid var statement')
+
+ name = tokens[1]
+ sep = tokens[2]
+ value = line.split(sep, 1)[1].strip()
+
+ if sep == '=':
+ pass # no need to process value
+ elif sep == ':':
+ #@@ Hack for backward-compatability
+ if tokens[3] == '\n': # multi-line var statement
+ block, text = self.read_indented_block(text, ' ')
+ lines = [self.readline(x)[0] for x in block.splitlines()]
+ nodes = []
+ for x in lines:
+ nodes.extend(x.nodes)
+ nodes.append(TextNode('\n'))
+ else: # single-line var statement
+ linenode, _ = self.readline(value)
+ nodes = linenode.nodes
+ parts = [node.emit('') for node in nodes]
+ value = "join_(%s)" % ", ".join(parts)
+ else:
+ raise SyntaxError('Invalid var statement')
+ return VarNode(name, value), text
+
+ def read_suite(self, text):
+ r"""Reads section by section till end of text.
+
+ >>> read_suite = Parser().read_suite
+ >>> read_suite('hello $name\nfoo\n')
+ [, ]
+ """
+ sections = []
+ while text:
+ section, text = self.read_section(text)
+ sections.append(section)
+ return SuiteNode(sections)
+
+ def readline(self, text):
+ r"""Reads one line from the text. Newline is supressed if the line ends with \.
+
+ >>> readline = Parser().readline
+ >>> readline('hello $name!\nbye!')
+ (, 'bye!')
+ >>> readline('hello $name!\\\nbye!')
+ (, 'bye!')
+ >>> readline('$f()\n\n')
+ (, '\n')
+ """
+ line, text = splitline(text)
+
+ # supress new line if line ends with \
+ if line.endswith('\\\n'):
+ line = line[:-2]
+
+ nodes = []
+ while line:
+ node, line = self.read_node(line)
+ nodes.append(node)
+
+ return LineNode(nodes), text
+
+ def read_node(self, text):
+ r"""Reads a node from the given text and returns the node and remaining text.
+
+ >>> read_node = Parser().read_node
+ >>> read_node('hello $name')
+ (t'hello ', '$name')
+ >>> read_node('$name')
+ ($name, '')
+ """
+ if text.startswith('$$'):
+ return TextNode('$'), text[2:]
+ elif text.startswith('$#'): # comment
+ line, text = splitline(text)
+ return TextNode('\n'), text
+ elif text.startswith('$'):
+ text = text[1:] # strip $
+ if text.startswith(':'):
+ escape = False
+ text = text[1:] # strip :
+ else:
+ escape = True
+ return self.read_expr(text, escape=escape)
+ else:
+ return self.read_text(text)
+
+ def read_text(self, text):
+ r"""Reads a text node from the given text.
+
+ >>> read_text = Parser().read_text
+ >>> read_text('hello $name')
+ (t'hello ', '$name')
+ """
+ index = text.find('$')
+ if index < 0:
+ return TextNode(text), ''
+ else:
+ return TextNode(text[:index]), text[index:]
+
+ def read_keyword(self, text):
+ line, text = splitline(text)
+ return StatementNode(line.strip() + "\n"), text
+
+ def read_expr(self, text, escape=True):
+ """Reads a python expression from the text and returns the expression and remaining text.
+
+ expr -> simple_expr | paren_expr
+ simple_expr -> id extended_expr
+ extended_expr -> attr_access | paren_expr extended_expr | ''
+ attr_access -> dot id extended_expr
+ paren_expr -> [ tokens ] | ( tokens ) | { tokens }
+
+ >>> read_expr = Parser().read_expr
+ >>> read_expr("name")
+ ($name, '')
+ >>> read_expr("a.b and c")
+ ($a.b, ' and c')
+ >>> read_expr("a. b")
+ ($a, '. b')
+ >>> read_expr("name")
+ ($name, '')
+ >>> read_expr("(limit)ing")
+ ($(limit), 'ing')
+ >>> read_expr('a[1, 2][:3].f(1+2, "weird string[).", 3 + 4) done.')
+ ($a[1, 2][:3].f(1+2, "weird string[).", 3 + 4), ' done.')
+ """
+ def simple_expr():
+ identifier()
+ extended_expr()
+
+ def identifier():
+ tokens.next()
+
+ def extended_expr():
+ lookahead = tokens.lookahead()
+ if lookahead is None:
+ return
+ elif lookahead.value == '.':
+ attr_access()
+ elif lookahead.value in parens:
+ paren_expr()
+ extended_expr()
+ else:
+ return
+
+ def attr_access():
+ from token import NAME # python token constants
+ dot = tokens.lookahead()
+ if tokens.lookahead2().type == NAME:
+ tokens.next() # consume dot
+ identifier()
+ extended_expr()
+
+ def paren_expr():
+ begin = tokens.next().value
+ end = parens[begin]
+ while True:
+ if tokens.lookahead().value in parens:
+ paren_expr()
+ else:
+ t = tokens.next()
+ if t.value == end:
+ break
+ return
+
+ parens = {
+ "(": ")",
+ "[": "]",
+ "{": "}"
+ }
+
+ def get_tokens(text):
+ """tokenize text using python tokenizer.
+ Python tokenizer ignores spaces, but they might be important in some cases.
+ This function introduces dummy space tokens when it identifies any ignored space.
+ Each token is a storage object containing type, value, begin and end.
+ """
+ readline = iter([text]).next
+ end = None
+ for t in tokenize.generate_tokens(readline):
+ t = storage(type=t[0], value=t[1], begin=t[2], end=t[3])
+ if end is not None and end != t.begin:
+ _, x1 = end
+ _, x2 = t.begin
+ yield storage(type=-1, value=text[x1:x2], begin=end, end=t.begin)
+ end = t.end
+ yield t
+
+ class BetterIter:
+ """Iterator like object with 2 support for 2 look aheads."""
+ def __init__(self, items):
+ self.iteritems = iter(items)
+ self.items = []
+ self.position = 0
+ self.current_item = None
+
+ def lookahead(self):
+ if len(self.items) <= self.position:
+ self.items.append(self._next())
+ return self.items[self.position]
+
+ def _next(self):
+ try:
+ return self.iteritems.next()
+ except StopIteration:
+ return None
+
+ def lookahead2(self):
+ if len(self.items) <= self.position+1:
+ self.items.append(self._next())
+ return self.items[self.position+1]
+
+ def next(self):
+ self.current_item = self.lookahead()
+ self.position += 1
+ return self.current_item
+
+ tokens = BetterIter(get_tokens(text))
+
+ if tokens.lookahead().value in parens:
+ paren_expr()
+ else:
+ simple_expr()
+ row, col = tokens.current_item.end
+ return ExpressionNode(text[:col], escape=escape), text[col:]
+
+ def read_assignment(self, text):
+ r"""Reads assignment statement from text.
+
+ >>> read_assignment = Parser().read_assignment
+ >>> read_assignment('a = b + 1\nfoo')
+ (, 'foo')
+ """
+ line, text = splitline(text)
+ return AssignmentNode(line.strip()), text
+
+ def python_lookahead(self, text):
+ """Returns the first python token from the given text.
+
+ >>> python_lookahead = Parser().python_lookahead
+ >>> python_lookahead('for i in range(10):')
+ 'for'
+ >>> python_lookahead('else:')
+ 'else'
+ >>> python_lookahead(' x = 1')
+ ' '
+ """
+ readline = iter([text]).next
+ tokens = tokenize.generate_tokens(readline)
+ return tokens.next()[1]
+
+ def python_tokens(self, text):
+ readline = iter([text]).next
+ tokens = tokenize.generate_tokens(readline)
+ return [t[1] for t in tokens]
+
+ def tabsout(self, line, indent):
+ indent = indent.replace('\t', ' ')
+ re_tabs = re_compile(r'^\t+')
+ mo = re_tabs.match(line)
+ if mo is None:
+ return line, 0
+ else:
+ actual_nbtabs = len(mo.group(0))
+ nbtabs = max(0, actual_nbtabs - len(indent) / 4)
+ return re_compile(r'\t').sub(' ', line, actual_nbtabs), nbtabs
+
+ def tabsin(self, line, nbtabs):
+ if nbtabs > 0: line = re_compile(r' ').sub('\t', line, nbtabs)
+ return line
+
+ def read_indented_block(self, text, indent):
+ r"""Read a block of text. A block is what typically follows a for or it statement.
+ It can be in the same line as that of the statement or an indented block.
+
+ >>> read_indented_block = Parser().read_indented_block
+ >>> read_indented_block(' a\n b\nc', ' ')
+ ('a\nb\n', 'c')
+ >>> read_indented_block(' a\n b\n c\nd', ' ')
+ ('a\n b\nc\n', 'd')
+ >>> read_indented_block(' a\n\n b\nc', ' ')
+ ('a\n\n b\n', 'c')
+ """
+ if indent == '':
+ return '', text
+
+ block = ""
+ while text:
+ oline, text2 = splitline(text)
+ line, nbtabs = self.tabsout(oline, indent)
+ if line.strip() == "":
+ block += '\n'
+ elif line.startswith(indent):
+ line = line[len(indent):]
+ block += self.tabsin(line, nbtabs)
+ else:
+ break
+ text = text2
+ return block, text
+
+ def read_statement(self, text):
+ r"""Reads a python statement.
+
+ >>> read_statement = Parser().read_statement
+ >>> read_statement('for i in range(10): hello $name')
+ ('for i in range(10):', ' hello $name')
+ """
+ tok = PythonTokenizer(text)
+ tok.consume_till(':')
+ return text[:tok.index], text[tok.index:]
+
+ def read_block_section(self, text, begin_indent=''):
+ r"""
+ >>> read_block_section = Parser().read_block_section
+ >>> read_block_section('for i in range(10): hello $i\nfoo')
+ (]>, 'foo')
+ >>> read_block_section('for i in range(10):\n hello $i\n foo', begin_indent=' ')
+ (]>, ' foo')
+ >>> read_block_section('for i in range(10):\n hello $i\nfoo')
+ (]>, 'foo')
+ """
+ line, text = splitline(text)
+ stmt, line = self.read_statement(line)
+ keyword = self.python_lookahead(stmt)
+
+ # if there is some thing left in the line
+ if line.strip():
+ block = line.lstrip()
+ else:
+ def find_indent(text):
+ rx = re_compile(' +')
+ match = rx.match(text)
+ first_indent = match and match.group(0)
+ return first_indent or ""
+
+ # find the indentation of the block by looking at the first line
+ text, nbtabs = self.tabsout(text, begin_indent)
+ first_indent = find_indent(text)[len(begin_indent):]
+
+ #TODO: fix this special case
+ if keyword == "code":
+ indent = begin_indent + first_indent
+ else:
+ indent = begin_indent + min(first_indent, INDENT)
+
+ block, text = self.read_indented_block(text, indent)
+ text = self.tabsin(text, nbtabs)
+
+ return self.create_block_node(keyword, stmt, block, begin_indent), text
+
+ def create_block_node(self, keyword, stmt, block, begin_indent):
+ if keyword in self.statement_nodes:
+ return self.statement_nodes[keyword](stmt, block, begin_indent)
+ else:
+ raise ParseError, 'Unknown statement: %s' % repr(keyword)
+
+class PythonTokenizer:
+ """Utility wrapper over python tokenizer."""
+ def __init__(self, text):
+ self.text = text
+ readline = iter([text]).next
+ self.tokens = tokenize.generate_tokens(readline)
+ self.index = 0
+
+ def consume_till(self, delim):
+ """Consumes tokens till colon.
+
+ >>> tok = PythonTokenizer('for i in range(10): hello $i')
+ >>> tok.consume_till(':')
+ >>> tok.text[:tok.index]
+ 'for i in range(10):'
+ >>> tok.text[tok.index:]
+ ' hello $i'
+ """
+ try:
+ while True:
+ t = self.next()
+ if t.value == delim:
+ break
+ elif t.value == '(':
+ self.consume_till(')')
+ elif t.value == '[':
+ self.consume_till(']')
+ elif t.value == '{':
+ self.consume_till('}')
+
+ # if end of line is found, it is an exception.
+ # Since there is no easy way to report the line number,
+ # leave the error reporting to the python parser later
+ #@@ This should be fixed.
+ if t.value == '\n':
+ break
+ except:
+ #raise ParseError, "Expected %s, found end of line." % repr(delim)
+
+ # raising ParseError doesn't show the line number.
+ # if this error is ignored, then it will be caught when compiling the python code.
+ return
+
+ def next(self):
+ type, t, begin, end, line = self.tokens.next()
+ row, col = end
+ self.index = col
+ return storage(type=type, value=t, begin=begin, end=end)
+
+class DefwithNode:
+ def __init__(self, defwith, suite):
+ if defwith:
+ self.defwith = defwith.replace('with', '__template__') + ':'
+ # offset 4 lines. for encoding, __lineoffset__, loop and self.
+ self.defwith += "\n __lineoffset__ = -4"
+ else:
+ self.defwith = 'def __template__():'
+ # offset 4 lines for encoding, __template__, __lineoffset__, loop and self.
+ self.defwith += "\n __lineoffset__ = -5"
+
+ self.defwith += "\n loop = ForLoop()"
+ self.defwith += "\n self = TemplateResult(); extend_ = self.extend"
+ self.suite = suite
+ self.end = "\n return self"
+
+ def emit(self, indent):
+ encoding = "# coding: utf-8\n"
+ return encoding + self.defwith + self.suite.emit(indent + INDENT) + self.end
+
+ def __repr__(self):
+ return "" % (self.defwith, self.suite)
+
+class TextNode:
+ def __init__(self, value):
+ self.value = value
+
+ def emit(self, indent, begin_indent=''):
+ return repr(safeunicode(self.value))
+
+ def __repr__(self):
+ return 't' + repr(self.value)
+
+class ExpressionNode:
+ def __init__(self, value, escape=True):
+ self.value = value.strip()
+
+ # convert ${...} to $(...)
+ if value.startswith('{') and value.endswith('}'):
+ self.value = '(' + self.value[1:-1] + ')'
+
+ self.escape = escape
+
+ def emit(self, indent, begin_indent=''):
+ return 'escape_(%s, %s)' % (self.value, bool(self.escape))
+
+ def __repr__(self):
+ if self.escape:
+ escape = ''
+ else:
+ escape = ':'
+ return "$%s%s" % (escape, self.value)
+
+class AssignmentNode:
+ def __init__(self, code):
+ self.code = code
+
+ def emit(self, indent, begin_indent=''):
+ return indent + self.code + "\n"
+
+ def __repr__(self):
+ return "" % repr(self.code)
+
+class LineNode:
+ def __init__(self, nodes):
+ self.nodes = nodes
+
+ def emit(self, indent, text_indent='', name=''):
+ text = [node.emit('') for node in self.nodes]
+ if text_indent:
+ text = [repr(text_indent)] + text
+
+ return indent + "extend_([%s])\n" % ", ".join(text)
+
+ def __repr__(self):
+ return "" % repr(self.nodes)
+
+INDENT = ' ' # 4 spaces
+
+class BlockNode:
+ def __init__(self, stmt, block, begin_indent=''):
+ self.stmt = stmt
+ self.suite = Parser().read_suite(block)
+ self.begin_indent = begin_indent
+
+ def emit(self, indent, text_indent=''):
+ text_indent = self.begin_indent + text_indent
+ out = indent + self.stmt + self.suite.emit(indent + INDENT, text_indent)
+ return out
+
+ def __repr__(self):
+ return "" % (repr(self.stmt), repr(self.suite))
+
+class ForNode(BlockNode):
+ def __init__(self, stmt, block, begin_indent=''):
+ self.original_stmt = stmt
+ tok = PythonTokenizer(stmt)
+ tok.consume_till('in')
+ a = stmt[:tok.index] # for i in
+ b = stmt[tok.index:-1] # rest of for stmt excluding :
+ stmt = a + ' loop.setup(' + b.strip() + '):'
+ BlockNode.__init__(self, stmt, block, begin_indent)
+
+ def __repr__(self):
+ return "" % (repr(self.original_stmt), repr(self.suite))
+
+class CodeNode:
+ def __init__(self, stmt, block, begin_indent=''):
+ # compensate one line for $code:
+ self.code = "\n" + block
+
+ def emit(self, indent, text_indent=''):
+ import re
+ rx = re.compile('^', re.M)
+ return rx.sub(indent, self.code).rstrip(' ')
+
+ def __repr__(self):
+ return "" % repr(self.code)
+
+class StatementNode:
+ def __init__(self, stmt):
+ self.stmt = stmt
+
+ def emit(self, indent, begin_indent=''):
+ return indent + self.stmt
+
+ def __repr__(self):
+ return "" % repr(self.stmt)
+
+class IfNode(BlockNode):
+ pass
+
+class ElseNode(BlockNode):
+ pass
+
+class ElifNode(BlockNode):
+ pass
+
+class DefNode(BlockNode):
+ def __init__(self, *a, **kw):
+ BlockNode.__init__(self, *a, **kw)
+
+ code = CodeNode("", "")
+ code.code = "self = TemplateResult(); extend_ = self.extend\n"
+ self.suite.sections.insert(0, code)
+
+ code = CodeNode("", "")
+ code.code = "return self\n"
+ self.suite.sections.append(code)
+
+ def emit(self, indent, text_indent=''):
+ text_indent = self.begin_indent + text_indent
+ out = indent + self.stmt + self.suite.emit(indent + INDENT, text_indent)
+ return indent + "__lineoffset__ -= 3\n" + out
+
+class VarNode:
+ def __init__(self, name, value):
+ self.name = name
+ self.value = value
+
+ def emit(self, indent, text_indent):
+ return indent + "self[%s] = %s\n" % (repr(self.name), self.value)
+
+ def __repr__(self):
+ return "" % (self.name, self.value)
+
+class SuiteNode:
+ """Suite is a list of sections."""
+ def __init__(self, sections):
+ self.sections = sections
+
+ def emit(self, indent, text_indent=''):
+ return "\n" + "".join([s.emit(indent, text_indent) for s in self.sections])
+
+ def __repr__(self):
+ return repr(self.sections)
+
+STATEMENT_NODES = {
+ 'for': ForNode,
+ 'while': BlockNode,
+ 'if': IfNode,
+ 'elif': ElifNode,
+ 'else': ElseNode,
+ 'def': DefNode,
+ 'code': CodeNode
+}
+
+KEYWORDS = [
+ "pass",
+ "break",
+ "continue",
+ "return"
+]
+
+TEMPLATE_BUILTIN_NAMES = [
+ "dict", "enumerate", "float", "int", "bool", "list", "long", "reversed",
+ "set", "slice", "tuple", "xrange",
+ "abs", "all", "any", "callable", "chr", "cmp", "divmod", "filter", "hex",
+ "id", "isinstance", "iter", "len", "max", "min", "oct", "ord", "pow", "range",
+ "True", "False",
+ "None",
+ "__import__", # some c-libraries like datetime requires __import__ to present in the namespace
+]
+
+import __builtin__
+TEMPLATE_BUILTINS = dict([(name, getattr(__builtin__, name)) for name in TEMPLATE_BUILTIN_NAMES if name in __builtin__.__dict__])
+
+class ForLoop:
+ """
+ Wrapper for expression in for stament to support loop.xxx helpers.
+
+ >>> loop = ForLoop()
+ >>> for x in loop.setup(['a', 'b', 'c']):
+ ... print loop.index, loop.revindex, loop.parity, x
+ ...
+ 1 3 odd a
+ 2 2 even b
+ 3 1 odd c
+ >>> loop.index
+ Traceback (most recent call last):
+ ...
+ AttributeError: index
+ """
+ def __init__(self):
+ self._ctx = None
+
+ def __getattr__(self, name):
+ if self._ctx is None:
+ raise AttributeError, name
+ else:
+ return getattr(self._ctx, name)
+
+ def setup(self, seq):
+ self._push()
+ return self._ctx.setup(seq)
+
+ def _push(self):
+ self._ctx = ForLoopContext(self, self._ctx)
+
+ def _pop(self):
+ self._ctx = self._ctx.parent
+
+class ForLoopContext:
+ """Stackable context for ForLoop to support nested for loops.
+ """
+ def __init__(self, forloop, parent):
+ self._forloop = forloop
+ self.parent = parent
+
+ def setup(self, seq):
+ try:
+ self.length = len(seq)
+ except:
+ self.length = 0
+
+ self.index = 0
+ for a in seq:
+ self.index += 1
+ yield a
+ self._forloop._pop()
+
+ index0 = property(lambda self: self.index-1)
+ first = property(lambda self: self.index == 1)
+ last = property(lambda self: self.index == self.length)
+ odd = property(lambda self: self.index % 2 == 1)
+ even = property(lambda self: self.index % 2 == 0)
+ parity = property(lambda self: ['odd', 'even'][self.even])
+ revindex0 = property(lambda self: self.length - self.index)
+ revindex = property(lambda self: self.length - self.index + 1)
+
+class BaseTemplate:
+ def __init__(self, code, filename, filter, globals, builtins):
+ self.filename = filename
+ self.filter = filter
+ self._globals = globals
+ self._builtins = builtins
+ if code:
+ self.t = self._compile(code)
+ else:
+ self.t = lambda: ''
+
+ def _compile(self, code):
+ env = self.make_env(self._globals or {}, self._builtins)
+ exec(code, env)
+ return env['__template__']
+
+ def __call__(self, *a, **kw):
+ __hidetraceback__ = True
+ return self.t(*a, **kw)
+
+ def make_env(self, globals, builtins):
+ return dict(globals,
+ __builtins__=builtins,
+ ForLoop=ForLoop,
+ TemplateResult=TemplateResult,
+ escape_=self._escape,
+ join_=self._join
+ )
+ def _join(self, *items):
+ return u"".join(items)
+
+ def _escape(self, value, escape=False):
+ if value is None:
+ value = ''
+
+ value = safeunicode(value)
+ if escape and self.filter:
+ value = self.filter(value)
+ return value
+
+class Template(BaseTemplate):
+ CONTENT_TYPES = {
+ '.html' : 'text/html; charset=utf-8',
+ '.xhtml' : 'application/xhtml+xml; charset=utf-8',
+ '.txt' : 'text/plain',
+ }
+ FILTERS = {
+ '.html': websafe,
+ '.xhtml': websafe,
+ '.xml': websafe
+ }
+ globals = {}
+
+ def __init__(self, text, filename='', filter=None, globals=None, builtins=None, extensions=None):
+ self.extensions = extensions or []
+ text = Template.normalize_text(text)
+ code = self.compile_template(text, filename)
+
+ _, ext = os.path.splitext(filename)
+ filter = filter or self.FILTERS.get(ext, None)
+ self.content_type = self.CONTENT_TYPES.get(ext, None)
+
+ if globals is None:
+ globals = self.globals
+ if builtins is None:
+ builtins = TEMPLATE_BUILTINS
+
+ BaseTemplate.__init__(self, code=code, filename=filename, filter=filter, globals=globals, builtins=builtins)
+
+ def normalize_text(text):
+ """Normalizes template text by correcting \r\n, tabs and BOM chars."""
+ text = text.replace('\r\n', '\n').replace('\r', '\n').expandtabs()
+ if not text.endswith('\n'):
+ text += '\n'
+
+ # ignore BOM chars at the begining of template
+ BOM = '\xef\xbb\xbf'
+ if isinstance(text, str) and text.startswith(BOM):
+ text = text[len(BOM):]
+
+ # support fort \$ for backward-compatibility
+ text = text.replace(r'\$', '$$')
+ return text
+ normalize_text = staticmethod(normalize_text)
+
+ def __call__(self, *a, **kw):
+ __hidetraceback__ = True
+ import webapi as web
+ if 'headers' in web.ctx and self.content_type:
+ web.header('Content-Type', self.content_type, unique=True)
+
+ return BaseTemplate.__call__(self, *a, **kw)
+
+ def generate_code(text, filename, parser=None):
+ # parse the text
+ parser = parser or Parser()
+ rootnode = parser.parse(text, filename)
+
+ # generate python code from the parse tree
+ code = rootnode.emit(indent="").strip()
+ return safestr(code)
+
+ generate_code = staticmethod(generate_code)
+
+ def create_parser(self):
+ p = Parser()
+ for ext in self.extensions:
+ p = ext(p)
+ return p
+
+ def compile_template(self, template_string, filename):
+ code = Template.generate_code(template_string, filename, parser=self.create_parser())
+
+ def get_source_line(filename, lineno):
+ try:
+ lines = open(filename).read().splitlines()
+ return lines[lineno]
+ except:
+ return None
+
+ try:
+ # compile the code first to report the errors, if any, with the filename
+ compiled_code = compile(code, filename, 'exec')
+ except SyntaxError, e:
+ # display template line that caused the error along with the traceback.
+ try:
+ e.msg += '\n\nTemplate traceback:\n File %s, line %s\n %s' % \
+ (repr(e.filename), e.lineno, get_source_line(e.filename, e.lineno-1))
+ except:
+ pass
+ raise
+
+ # make sure code is safe - but not with jython, it doesn't have a working compiler module
+ if not sys.platform.startswith('java'):
+ try:
+ import compiler
+ ast = compiler.parse(code)
+ SafeVisitor().walk(ast, filename)
+ except ImportError:
+ warnings.warn("Unabled to import compiler module. Unable to check templates for safety.")
+ else:
+ warnings.warn("SECURITY ISSUE: You are using Jython, which does not support checking templates for safety. Your templates can execute arbitrary code.")
+
+ return compiled_code
+
+class CompiledTemplate(Template):
+ def __init__(self, f, filename):
+ Template.__init__(self, '', filename)
+ self.t = f
+
+ def compile_template(self, *a):
+ return None
+
+ def _compile(self, *a):
+ return None
+
+class Render:
+ """The most preferred way of using templates.
+
+ render = web.template.render('templates')
+ print render.foo()
+
+ Optional parameter can be `base` can be used to pass output of
+ every template through the base template.
+
+ render = web.template.render('templates', base='layout')
+ """
+ def __init__(self, loc='templates', cache=None, base=None, **keywords):
+ self._loc = loc
+ self._keywords = keywords
+
+ if cache is None:
+ cache = not config.get('debug', False)
+
+ if cache:
+ self._cache = {}
+ else:
+ self._cache = None
+
+ if base and not hasattr(base, '__call__'):
+ # make base a function, so that it can be passed to sub-renders
+ self._base = lambda page: self._template(base)(page)
+ else:
+ self._base = base
+
+ def _add_global(self, obj, name=None):
+ """Add a global to this rendering instance."""
+ if 'globals' not in self._keywords: self._keywords['globals'] = {}
+ if not name:
+ name = obj.__name__
+ self._keywords['globals'][name] = obj
+
+ def _lookup(self, name):
+ path = os.path.join(self._loc, name)
+ if os.path.isdir(path):
+ return 'dir', path
+ else:
+ path = self._findfile(path)
+ if path:
+ return 'file', path
+ else:
+ return 'none', None
+
+ def _load_template(self, name):
+ kind, path = self._lookup(name)
+
+ if kind == 'dir':
+ return Render(path, cache=self._cache is not None, base=self._base, **self._keywords)
+ elif kind == 'file':
+ return Template(open(path).read(), filename=path, **self._keywords)
+ else:
+ raise AttributeError, "No template named " + name
+
+ def _findfile(self, path_prefix):
+ p = [f for f in glob.glob(path_prefix + '.*') if not f.endswith('~')] # skip backup files
+ p.sort() # sort the matches for deterministic order
+ return p and p[0]
+
+ def _template(self, name):
+ if self._cache is not None:
+ if name not in self._cache:
+ self._cache[name] = self._load_template(name)
+ return self._cache[name]
+ else:
+ return self._load_template(name)
+
+ def __getattr__(self, name):
+ t = self._template(name)
+ if self._base and isinstance(t, Template):
+ def template(*a, **kw):
+ return self._base(t(*a, **kw))
+ return template
+ else:
+ return self._template(name)
+
+class GAE_Render(Render):
+ # Render gets over-written. make a copy here.
+ super = Render
+ def __init__(self, loc, *a, **kw):
+ GAE_Render.super.__init__(self, loc, *a, **kw)
+
+ import types
+ if isinstance(loc, types.ModuleType):
+ self.mod = loc
+ else:
+ name = loc.rstrip('/').replace('/', '.')
+ self.mod = __import__(name, None, None, ['x'])
+
+ self.mod.__dict__.update(kw.get('builtins', TEMPLATE_BUILTINS))
+ self.mod.__dict__.update(Template.globals)
+ self.mod.__dict__.update(kw.get('globals', {}))
+
+ def _load_template(self, name):
+ t = getattr(self.mod, name)
+ import types
+ if isinstance(t, types.ModuleType):
+ return GAE_Render(t, cache=self._cache is not None, base=self._base, **self._keywords)
+ else:
+ return t
+
+render = Render
+# setup render for Google App Engine.
+try:
+ from google import appengine
+ render = Render = GAE_Render
+except ImportError:
+ pass
+
+def frender(path, **keywords):
+ """Creates a template from the given file path.
+ """
+ return Template(open(path).read(), filename=path, **keywords)
+
+def compile_templates(root):
+ """Compiles templates to python code."""
+ re_start = re_compile('^', re.M)
+
+ for dirpath, dirnames, filenames in os.walk(root):
+ filenames = [f for f in filenames if not f.startswith('.') and not f.endswith('~') and not f.startswith('__init__.py')]
+
+ for d in dirnames[:]:
+ if d.startswith('.'):
+ dirnames.remove(d) # don't visit this dir
+
+ out = open(os.path.join(dirpath, '__init__.py'), 'w')
+ out.write('from web.template import CompiledTemplate, ForLoop, TemplateResult\n\n')
+ if dirnames:
+ out.write("import " + ", ".join(dirnames))
+ out.write("\n")
+
+ for f in filenames:
+ path = os.path.join(dirpath, f)
+
+ if '.' in f:
+ name, _ = f.split('.', 1)
+ else:
+ name = f
+
+ text = open(path).read()
+ text = Template.normalize_text(text)
+ code = Template.generate_code(text, path)
+
+ code = code.replace("__template__", name, 1)
+
+ out.write(code)
+
+ out.write('\n\n')
+ out.write('%s = CompiledTemplate(%s, %s)\n' % (name, name, repr(path)))
+ out.write("join_ = %s._join; escape_ = %s._escape\n\n" % (name, name))
+
+ # create template to make sure it compiles
+ t = Template(open(path).read(), path)
+ out.close()
+
+class ParseError(Exception):
+ pass
+
+class SecurityError(Exception):
+ """The template seems to be trying to do something naughty."""
+ pass
+
+# Enumerate all the allowed AST nodes
+ALLOWED_AST_NODES = [
+ "Add", "And",
+# "AssAttr",
+ "AssList", "AssName", "AssTuple",
+# "Assert",
+ "Assign", "AugAssign",
+# "Backquote",
+ "Bitand", "Bitor", "Bitxor", "Break",
+ "CallFunc","Class", "Compare", "Const", "Continue",
+ "Decorators", "Dict", "Discard", "Div",
+ "Ellipsis", "EmptyNode",
+# "Exec",
+ "Expression", "FloorDiv", "For",
+# "From",
+ "Function",
+ "GenExpr", "GenExprFor", "GenExprIf", "GenExprInner",
+ "Getattr",
+# "Global",
+ "If", "IfExp",
+# "Import",
+ "Invert", "Keyword", "Lambda", "LeftShift",
+ "List", "ListComp", "ListCompFor", "ListCompIf", "Mod",
+ "Module",
+ "Mul", "Name", "Not", "Or", "Pass", "Power",
+# "Print", "Printnl", "Raise",
+ "Return", "RightShift", "Slice", "Sliceobj",
+ "Stmt", "Sub", "Subscript",
+# "TryExcept", "TryFinally",
+ "Tuple", "UnaryAdd", "UnarySub",
+ "While", "With", "Yield",
+]
+
+class SafeVisitor(object):
+ """
+ Make sure code is safe by walking through the AST.
+
+ Code considered unsafe if:
+ * it has restricted AST nodes
+ * it is trying to access resricted attributes
+
+ Adopted from http://www.zafar.se/bkz/uploads/safe.txt (public domain, Babar K. Zafar)
+ """
+ def __init__(self):
+ "Initialize visitor by generating callbacks for all AST node types."
+ self.errors = []
+
+ def walk(self, ast, filename):
+ "Validate each node in AST and raise SecurityError if the code is not safe."
+ self.filename = filename
+ self.visit(ast)
+
+ if self.errors:
+ raise SecurityError, '\n'.join([str(err) for err in self.errors])
+
+ def visit(self, node, *args):
+ "Recursively validate node and all of its children."
+ def classname(obj):
+ return obj.__class__.__name__
+ nodename = classname(node)
+ fn = getattr(self, 'visit' + nodename, None)
+
+ if fn:
+ fn(node, *args)
+ else:
+ if nodename not in ALLOWED_AST_NODES:
+ self.fail(node, *args)
+
+ for child in node.getChildNodes():
+ self.visit(child, *args)
+
+ def visitName(self, node, *args):
+ "Disallow any attempts to access a restricted attr."
+ #self.assert_attr(node.getChildren()[0], node)
+ pass
+
+ def visitGetattr(self, node, *args):
+ "Disallow any attempts to access a restricted attribute."
+ self.assert_attr(node.attrname, node)
+
+ def assert_attr(self, attrname, node):
+ if self.is_unallowed_attr(attrname):
+ lineno = self.get_node_lineno(node)
+ e = SecurityError("%s:%d - access to attribute '%s' is denied" % (self.filename, lineno, attrname))
+ self.errors.append(e)
+
+ def is_unallowed_attr(self, name):
+ return name.startswith('_') \
+ or name.startswith('func_') \
+ or name.startswith('im_')
+
+ def get_node_lineno(self, node):
+ return (node.lineno) and node.lineno or 0
+
+ def fail(self, node, *args):
+ "Default callback for unallowed AST nodes."
+ lineno = self.get_node_lineno(node)
+ nodename = node.__class__.__name__
+ e = SecurityError("%s:%d - execution of '%s' statements is denied" % (self.filename, lineno, nodename))
+ self.errors.append(e)
+
+class TemplateResult(object, DictMixin):
+ """Dictionary like object for storing template output.
+
+ The result of a template execution is usally a string, but sometimes it
+ contains attributes set using $var. This class provides a simple
+ dictionary like interface for storing the output of the template and the
+ attributes. The output is stored with a special key __body__. Convering
+ the the TemplateResult to string or unicode returns the value of __body__.
+
+ When the template is in execution, the output is generated part by part
+ and those parts are combined at the end. Parts are added to the
+ TemplateResult by calling the `extend` method and the parts are combined
+ seemlessly when __body__ is accessed.
+
+ >>> d = TemplateResult(__body__='hello, world', x='foo')
+ >>> d
+
+ >>> print d
+ hello, world
+ >>> d.x
+ 'foo'
+ >>> d = TemplateResult()
+ >>> d.extend([u'hello', u'world'])
+ >>> d
+
+ """
+ def __init__(self, *a, **kw):
+ self.__dict__["_d"] = dict(*a, **kw)
+ self._d.setdefault("__body__", u'')
+
+ self.__dict__['_parts'] = []
+ self.__dict__["extend"] = self._parts.extend
+
+ self._d.setdefault("__body__", None)
+
+ def keys(self):
+ return self._d.keys()
+
+ def _prepare_body(self):
+ """Prepare value of __body__ by joining parts.
+ """
+ if self._parts:
+ value = u"".join(self._parts)
+ self._parts[:] = []
+ body = self._d.get('__body__')
+ if body:
+ self._d['__body__'] = body + value
+ else:
+ self._d['__body__'] = value
+
+ def __getitem__(self, name):
+ if name == "__body__":
+ self._prepare_body()
+ return self._d[name]
+
+ def __setitem__(self, name, value):
+ if name == "__body__":
+ self._prepare_body()
+ return self._d.__setitem__(name, value)
+
+ def __delitem__(self, name):
+ if name == "__body__":
+ self._prepare_body()
+ return self._d.__delitem__(name)
+
+ def __getattr__(self, key):
+ try:
+ return self[key]
+ except KeyError, k:
+ raise AttributeError, k
+
+ def __setattr__(self, key, value):
+ self[key] = value
+
+ def __delattr__(self, key):
+ try:
+ del self[key]
+ except KeyError, k:
+ raise AttributeError, k
+
+ def __unicode__(self):
+ self._prepare_body()
+ return self["__body__"]
+
+ def __str__(self):
+ self._prepare_body()
+ return self["__body__"].encode('utf-8')
+
+ def __repr__(self):
+ self._prepare_body()
+ return "" % self._d
+
+def test():
+ r"""Doctest for testing template module.
+
+ Define a utility function to run template test.
+
+ >>> class TestResult:
+ ... def __init__(self, t): self.t = t
+ ... def __getattr__(self, name): return getattr(self.t, name)
+ ... def __repr__(self): return repr(unicode(self))
+ ...
+ >>> def t(code, **keywords):
+ ... tmpl = Template(code, **keywords)
+ ... return lambda *a, **kw: TestResult(tmpl(*a, **kw))
+ ...
+
+ Simple tests.
+
+ >>> t('1')()
+ u'1\n'
+ >>> t('$def with ()\n1')()
+ u'1\n'
+ >>> t('$def with (a)\n$a')(1)
+ u'1\n'
+ >>> t('$def with (a=0)\n$a')(1)
+ u'1\n'
+ >>> t('$def with (a=0)\n$a')(a=1)
+ u'1\n'
+
+ Test complicated expressions.
+
+ >>> t('$def with (x)\n$x.upper()')('hello')
+ u'HELLO\n'
+ >>> t('$(2 * 3 + 4 * 5)')()
+ u'26\n'
+ >>> t('${2 * 3 + 4 * 5}')()
+ u'26\n'
+ >>> t('$def with (limit)\nkeep $(limit)ing.')('go')
+ u'keep going.\n'
+ >>> t('$def with (a)\n$a.b[0]')(storage(b=[1]))
+ u'1\n'
+
+ Test html escaping.
+
+ >>> t('$def with (x)\n$x', filename='a.html')('')
+ u'<html>\n'
+ >>> t('$def with (x)\n$x', filename='a.txt')('')
+ u'\n'
+
+ Test if, for and while.
+
+ >>> t('$if 1: 1')()
+ u'1\n'
+ >>> t('$if 1:\n 1')()
+ u'1\n'
+ >>> t('$if 1:\n 1\\')()
+ u'1'
+ >>> t('$if 0: 0\n$elif 1: 1')()
+ u'1\n'
+ >>> t('$if 0: 0\n$elif None: 0\n$else: 1')()
+ u'1\n'
+ >>> t('$if 0 < 1 and 1 < 2: 1')()
+ u'1\n'
+ >>> t('$for x in [1, 2, 3]: $x')()
+ u'1\n2\n3\n'
+ >>> t('$def with (d)\n$for k, v in d.iteritems(): $k')({1: 1})
+ u'1\n'
+ >>> t('$for x in [1, 2, 3]:\n\t$x')()
+ u' 1\n 2\n 3\n'
+ >>> t('$def with (a)\n$while a and a.pop():1')([1, 2, 3])
+ u'1\n1\n1\n'
+
+ The space after : must be ignored.
+
+ >>> t('$if True: foo')()
+ u'foo\n'
+
+ Test loop.xxx.
+
+ >>> t("$for i in range(5):$loop.index, $loop.parity")()
+ u'1, odd\n2, even\n3, odd\n4, even\n5, odd\n'
+ >>> t("$for i in range(2):\n $for j in range(2):$loop.parent.parity $loop.parity")()
+ u'odd odd\nodd even\neven odd\neven even\n'
+
+ Test assignment.
+
+ >>> t('$ a = 1\n$a')()
+ u'1\n'
+ >>> t('$ a = [1]\n$a[0]')()
+ u'1\n'
+ >>> t('$ a = {1: 1}\n$a.keys()[0]')()
+ u'1\n'
+ >>> t('$ a = []\n$if not a: 1')()
+ u'1\n'
+ >>> t('$ a = {}\n$if not a: 1')()
+ u'1\n'
+ >>> t('$ a = -1\n$a')()
+ u'-1\n'
+ >>> t('$ a = "1"\n$a')()
+ u'1\n'
+
+ Test comments.
+
+ >>> t('$# 0')()
+ u'\n'
+ >>> t('hello$#comment1\nhello$#comment2')()
+ u'hello\nhello\n'
+ >>> t('$#comment0\nhello$#comment1\nhello$#comment2')()
+ u'\nhello\nhello\n'
+
+ Test unicode.
+
+ >>> t('$def with (a)\n$a')(u'\u203d')
+ u'\u203d\n'
+ >>> t('$def with (a)\n$a')(u'\u203d'.encode('utf-8'))
+ u'\u203d\n'
+ >>> t(u'$def with (a)\n$a $:a')(u'\u203d')
+ u'\u203d \u203d\n'
+ >>> t(u'$def with ()\nfoo')()
+ u'foo\n'
+ >>> def f(x): return x
+ ...
+ >>> t(u'$def with (f)\n$:f("x")')(f)
+ u'x\n'
+ >>> t('$def with (f)\n$:f("x")')(f)
+ u'x\n'
+
+ Test dollar escaping.
+
+ >>> t("Stop, $$money isn't evaluated.")()
+ u"Stop, $money isn't evaluated.\n"
+ >>> t("Stop, \$money isn't evaluated.")()
+ u"Stop, $money isn't evaluated.\n"
+
+ Test space sensitivity.
+
+ >>> t('$def with (x)\n$x')(1)
+ u'1\n'
+ >>> t('$def with(x ,y)\n$x')(1, 1)
+ u'1\n'
+ >>> t('$(1 + 2*3 + 4)')()
+ u'11\n'
+
+ Make sure globals are working.
+
+ >>> t('$x')()
+ Traceback (most recent call last):
+ ...
+ NameError: global name 'x' is not defined
+ >>> t('$x', globals={'x': 1})()
+ u'1\n'
+
+ Can't change globals.
+
+ >>> t('$ x = 2\n$x', globals={'x': 1})()
+ u'2\n'
+ >>> t('$ x = x + 1\n$x', globals={'x': 1})()
+ Traceback (most recent call last):
+ ...
+ UnboundLocalError: local variable 'x' referenced before assignment
+
+ Make sure builtins are customizable.
+
+ >>> t('$min(1, 2)')()
+ u'1\n'
+ >>> t('$min(1, 2)', builtins={})()
+ Traceback (most recent call last):
+ ...
+ NameError: global name 'min' is not defined
+
+ Test vars.
+
+ >>> x = t('$var x: 1')()
+ >>> x.x
+ u'1'
+ >>> x = t('$var x = 1')()
+ >>> x.x
+ 1
+ >>> x = t('$var x: \n foo\n bar')()
+ >>> x.x
+ u'foo\nbar\n'
+
+ Test BOM chars.
+
+ >>> t('\xef\xbb\xbf$def with(x)\n$x')('foo')
+ u'foo\n'
+
+ Test for with weird cases.
+
+ >>> t('$for i in range(10)[1:5]:\n $i')()
+ u'1\n2\n3\n4\n'
+ >>> t("$for k, v in {'a': 1, 'b': 2}.items():\n $k $v")()
+ u'a 1\nb 2\n'
+ >>> t("$for k, v in ({'a': 1, 'b': 2}.items():\n $k $v")()
+ Traceback (most recent call last):
+ ...
+ SyntaxError: invalid syntax
+
+ Test datetime.
+
+ >>> import datetime
+ >>> t("$def with (date)\n$date.strftime('%m %Y')")(datetime.datetime(2009, 1, 1))
+ u'01 2009\n'
+ """
+ pass
+
+if __name__ == "__main__":
+ import sys
+ if '--compile' in sys.argv:
+ compile_templates(sys.argv[2])
+ else:
+ import doctest
+ doctest.testmod()
diff --git a/lib/nulib/python/nulib/ext/web/test.py b/lib/nulib/python/nulib/ext/web/test.py
new file mode 100644
index 0000000..a942a91
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/test.py
@@ -0,0 +1,51 @@
+"""test utilities
+(part of web.py)
+"""
+import unittest
+import sys, os
+import web
+
+TestCase = unittest.TestCase
+TestSuite = unittest.TestSuite
+
+def load_modules(names):
+ return [__import__(name, None, None, "x") for name in names]
+
+def module_suite(module, classnames=None):
+ """Makes a suite from a module."""
+ if classnames:
+ return unittest.TestLoader().loadTestsFromNames(classnames, module)
+ elif hasattr(module, 'suite'):
+ return module.suite()
+ else:
+ return unittest.TestLoader().loadTestsFromModule(module)
+
+def doctest_suite(module_names):
+ """Makes a test suite from doctests."""
+ import doctest
+ suite = TestSuite()
+ for mod in load_modules(module_names):
+ suite.addTest(doctest.DocTestSuite(mod))
+ return suite
+
+def suite(module_names):
+ """Creates a suite from multiple modules."""
+ suite = TestSuite()
+ for mod in load_modules(module_names):
+ suite.addTest(module_suite(mod))
+ return suite
+
+def runTests(suite):
+ runner = unittest.TextTestRunner()
+ return runner.run(suite)
+
+def main(suite=None):
+ if not suite:
+ main_module = __import__('__main__')
+ # allow command line switches
+ args = [a for a in sys.argv[1:] if not a.startswith('-')]
+ suite = module_suite(main_module, args or None)
+
+ result = runTests(suite)
+ sys.exit(not result.wasSuccessful())
+
diff --git a/lib/nulib/python/nulib/ext/web/utils.py b/lib/nulib/python/nulib/ext/web/utils.py
new file mode 100755
index 0000000..f65a60d
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/utils.py
@@ -0,0 +1,1547 @@
+#!/usr/bin/env python
+"""
+General Utilities
+(part of web.py)
+"""
+
+__all__ = [
+ "Storage", "storage", "storify",
+ "Counter", "counter",
+ "iters",
+ "rstrips", "lstrips", "strips",
+ "safeunicode", "safestr", "utf8",
+ "TimeoutError", "timelimit",
+ "Memoize", "memoize",
+ "re_compile", "re_subm",
+ "group", "uniq", "iterview",
+ "IterBetter", "iterbetter",
+ "safeiter", "safewrite",
+ "dictreverse", "dictfind", "dictfindall", "dictincr", "dictadd",
+ "requeue", "restack",
+ "listget", "intget", "datestr",
+ "numify", "denumify", "commify", "dateify",
+ "nthstr", "cond",
+ "CaptureStdout", "capturestdout", "Profile", "profile",
+ "tryall",
+ "ThreadedDict", "threadeddict",
+ "autoassign",
+ "to36",
+ "safemarkdown",
+ "sendmail"
+]
+
+import re, sys, time, threading, itertools, traceback, os
+
+try:
+ import subprocess
+except ImportError:
+ subprocess = None
+
+try: import datetime
+except ImportError: pass
+
+try: set
+except NameError:
+ from sets import Set as set
+
+try:
+ from threading import local as threadlocal
+except ImportError:
+ from python23 import threadlocal
+
+class Storage(dict):
+ """
+ A Storage object is like a dictionary except `obj.foo` can be used
+ in addition to `obj['foo']`.
+
+ >>> o = storage(a=1)
+ >>> o.a
+ 1
+ >>> o['a']
+ 1
+ >>> o.a = 2
+ >>> o['a']
+ 2
+ >>> del o.a
+ >>> o.a
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'a'
+
+ """
+ def __getattr__(self, key):
+ try:
+ return self[key]
+ except KeyError, k:
+ raise AttributeError, k
+
+ def __setattr__(self, key, value):
+ self[key] = value
+
+ def __delattr__(self, key):
+ try:
+ del self[key]
+ except KeyError, k:
+ raise AttributeError, k
+
+ def __repr__(self):
+ return ''
+
+storage = Storage
+
+def storify(mapping, *requireds, **defaults):
+ """
+ Creates a `storage` object from dictionary `mapping`, raising `KeyError` if
+ d doesn't have all of the keys in `requireds` and using the default
+ values for keys found in `defaults`.
+
+ For example, `storify({'a':1, 'c':3}, b=2, c=0)` will return the equivalent of
+ `storage({'a':1, 'b':2, 'c':3})`.
+
+ If a `storify` value is a list (e.g. multiple values in a form submission),
+ `storify` returns the last element of the list, unless the key appears in
+ `defaults` as a list. Thus:
+
+ >>> storify({'a':[1, 2]}).a
+ 2
+ >>> storify({'a':[1, 2]}, a=[]).a
+ [1, 2]
+ >>> storify({'a':1}, a=[]).a
+ [1]
+ >>> storify({}, a=[]).a
+ []
+
+ Similarly, if the value has a `value` attribute, `storify will return _its_
+ value, unless the key appears in `defaults` as a dictionary.
+
+ >>> storify({'a':storage(value=1)}).a
+ 1
+ >>> storify({'a':storage(value=1)}, a={}).a
+
+ >>> storify({}, a={}).a
+ {}
+
+ Optionally, keyword parameter `_unicode` can be passed to convert all values to unicode.
+
+ >>> storify({'x': 'a'}, _unicode=True)
+
+ >>> storify({'x': storage(value='a')}, x={}, _unicode=True)
+ }>
+ >>> storify({'x': storage(value='a')}, _unicode=True)
+
+ """
+ _unicode = defaults.pop('_unicode', False)
+
+ # if _unicode is callable object, use it convert a string to unicode.
+ to_unicode = safeunicode
+ if _unicode is not False and hasattr(_unicode, "__call__"):
+ to_unicode = _unicode
+
+ def unicodify(s):
+ if _unicode and isinstance(s, str): return to_unicode(s)
+ else: return s
+
+ def getvalue(x):
+ if hasattr(x, 'file') and hasattr(x, 'value'):
+ return x.value
+ elif hasattr(x, 'value'):
+ return unicodify(x.value)
+ else:
+ return unicodify(x)
+
+ stor = Storage()
+ for key in requireds + tuple(mapping.keys()):
+ value = mapping[key]
+ if isinstance(value, list):
+ if isinstance(defaults.get(key), list):
+ value = [getvalue(x) for x in value]
+ else:
+ value = value[-1]
+ if not isinstance(defaults.get(key), dict):
+ value = getvalue(value)
+ if isinstance(defaults.get(key), list) and not isinstance(value, list):
+ value = [value]
+ setattr(stor, key, value)
+
+ for (key, value) in defaults.iteritems():
+ result = value
+ if hasattr(stor, key):
+ result = stor[key]
+ if value == () and not isinstance(result, tuple):
+ result = (result,)
+ setattr(stor, key, result)
+
+ return stor
+
+class Counter(storage):
+ """Keeps count of how many times something is added.
+
+ >>> c = counter()
+ >>> c.add('x')
+ >>> c.add('x')
+ >>> c.add('x')
+ >>> c.add('x')
+ >>> c.add('x')
+ >>> c.add('y')
+ >>> c
+
+ >>> c.most()
+ ['x']
+ """
+ def add(self, n):
+ self.setdefault(n, 0)
+ self[n] += 1
+
+ def most(self):
+ """Returns the keys with maximum count."""
+ m = max(self.itervalues())
+ return [k for k, v in self.iteritems() if v == m]
+
+ def least(self):
+ """Returns the keys with mininum count."""
+ m = min(self.itervalues())
+ return [k for k, v in self.iteritems() if v == m]
+
+ def percent(self, key):
+ """Returns what percentage a certain key is of all entries.
+
+ >>> c = counter()
+ >>> c.add('x')
+ >>> c.add('x')
+ >>> c.add('x')
+ >>> c.add('y')
+ >>> c.percent('x')
+ 0.75
+ >>> c.percent('y')
+ 0.25
+ """
+ return float(self[key])/sum(self.values())
+
+ def sorted_keys(self):
+ """Returns keys sorted by value.
+
+ >>> c = counter()
+ >>> c.add('x')
+ >>> c.add('x')
+ >>> c.add('y')
+ >>> c.sorted_keys()
+ ['x', 'y']
+ """
+ return sorted(self.keys(), key=lambda k: self[k], reverse=True)
+
+ def sorted_values(self):
+ """Returns values sorted by value.
+
+ >>> c = counter()
+ >>> c.add('x')
+ >>> c.add('x')
+ >>> c.add('y')
+ >>> c.sorted_values()
+ [2, 1]
+ """
+ return [self[k] for k in self.sorted_keys()]
+
+ def sorted_items(self):
+ """Returns items sorted by value.
+
+ >>> c = counter()
+ >>> c.add('x')
+ >>> c.add('x')
+ >>> c.add('y')
+ >>> c.sorted_items()
+ [('x', 2), ('y', 1)]
+ """
+ return [(k, self[k]) for k in self.sorted_keys()]
+
+ def __repr__(self):
+ return ''
+
+counter = Counter
+
+iters = [list, tuple]
+import __builtin__
+if hasattr(__builtin__, 'set'):
+ iters.append(set)
+if hasattr(__builtin__, 'frozenset'):
+ iters.append(set)
+if sys.version_info < (2,6): # sets module deprecated in 2.6
+ try:
+ from sets import Set
+ iters.append(Set)
+ except ImportError:
+ pass
+
+class _hack(tuple): pass
+iters = _hack(iters)
+iters.__doc__ = """
+A list of iterable items (like lists, but not strings). Includes whichever
+of lists, tuples, sets, and Sets are available in this version of Python.
+"""
+
+def _strips(direction, text, remove):
+ if isinstance(remove, iters):
+ for subr in remove:
+ text = _strips(direction, text, subr)
+ return text
+
+ if direction == 'l':
+ if text.startswith(remove):
+ return text[len(remove):]
+ elif direction == 'r':
+ if text.endswith(remove):
+ return text[:-len(remove)]
+ else:
+ raise ValueError, "Direction needs to be r or l."
+ return text
+
+def rstrips(text, remove):
+ """
+ removes the string `remove` from the right of `text`
+
+ >>> rstrips("foobar", "bar")
+ 'foo'
+
+ """
+ return _strips('r', text, remove)
+
+def lstrips(text, remove):
+ """
+ removes the string `remove` from the left of `text`
+
+ >>> lstrips("foobar", "foo")
+ 'bar'
+ >>> lstrips('http://foo.org/', ['http://', 'https://'])
+ 'foo.org/'
+ >>> lstrips('FOOBARBAZ', ['FOO', 'BAR'])
+ 'BAZ'
+ >>> lstrips('FOOBARBAZ', ['BAR', 'FOO'])
+ 'BARBAZ'
+
+ """
+ return _strips('l', text, remove)
+
+def strips(text, remove):
+ """
+ removes the string `remove` from the both sides of `text`
+
+ >>> strips("foobarfoo", "foo")
+ 'bar'
+
+ """
+ return rstrips(lstrips(text, remove), remove)
+
+def safeunicode(obj, encoding='utf-8'):
+ r"""
+ Converts any given object to unicode string.
+
+ >>> safeunicode('hello')
+ u'hello'
+ >>> safeunicode(2)
+ u'2'
+ >>> safeunicode('\xe1\x88\xb4')
+ u'\u1234'
+ """
+ t = type(obj)
+ if t is unicode:
+ return obj
+ elif t is str:
+ return obj.decode(encoding)
+ elif t in [int, float, bool]:
+ return unicode(obj)
+ elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):
+ return unicode(obj)
+ else:
+ return str(obj).decode(encoding)
+
+def safestr(obj, encoding='utf-8'):
+ r"""
+ Converts any given object to utf-8 encoded string.
+
+ >>> safestr('hello')
+ 'hello'
+ >>> safestr(u'\u1234')
+ '\xe1\x88\xb4'
+ >>> safestr(2)
+ '2'
+ """
+ if isinstance(obj, unicode):
+ return obj.encode(encoding)
+ elif isinstance(obj, str):
+ return obj
+ elif hasattr(obj, 'next'): # iterator
+ return itertools.imap(safestr, obj)
+ else:
+ return str(obj)
+
+# for backward-compatibility
+utf8 = safestr
+
+class TimeoutError(Exception): pass
+def timelimit(timeout):
+ """
+ A decorator to limit a function to `timeout` seconds, raising `TimeoutError`
+ if it takes longer.
+
+ >>> import time
+ >>> def meaningoflife():
+ ... time.sleep(.2)
+ ... return 42
+ >>>
+ >>> timelimit(.1)(meaningoflife)()
+ Traceback (most recent call last):
+ ...
+ TimeoutError: took too long
+ >>> timelimit(1)(meaningoflife)()
+ 42
+
+ _Caveat:_ The function isn't stopped after `timeout` seconds but continues
+ executing in a separate thread. (There seems to be no way to kill a thread.)
+
+ inspired by
+ """
+ def _1(function):
+ def _2(*args, **kw):
+ class Dispatch(threading.Thread):
+ def __init__(self):
+ threading.Thread.__init__(self)
+ self.result = None
+ self.error = None
+
+ self.setDaemon(True)
+ self.start()
+
+ def run(self):
+ try:
+ self.result = function(*args, **kw)
+ except:
+ self.error = sys.exc_info()
+
+ c = Dispatch()
+ c.join(timeout)
+ if c.isAlive():
+ raise TimeoutError, 'took too long'
+ if c.error:
+ raise c.error[0], c.error[1]
+ return c.result
+ return _2
+ return _1
+
+class Memoize:
+ """
+ 'Memoizes' a function, caching its return values for each input.
+ If `expires` is specified, values are recalculated after `expires` seconds.
+ If `background` is specified, values are recalculated in a separate thread.
+
+ >>> calls = 0
+ >>> def howmanytimeshaveibeencalled():
+ ... global calls
+ ... calls += 1
+ ... return calls
+ >>> fastcalls = memoize(howmanytimeshaveibeencalled)
+ >>> howmanytimeshaveibeencalled()
+ 1
+ >>> howmanytimeshaveibeencalled()
+ 2
+ >>> fastcalls()
+ 3
+ >>> fastcalls()
+ 3
+ >>> import time
+ >>> fastcalls = memoize(howmanytimeshaveibeencalled, .1, background=False)
+ >>> fastcalls()
+ 4
+ >>> fastcalls()
+ 4
+ >>> time.sleep(.2)
+ >>> fastcalls()
+ 5
+ >>> def slowfunc():
+ ... time.sleep(.1)
+ ... return howmanytimeshaveibeencalled()
+ >>> fastcalls = memoize(slowfunc, .2, background=True)
+ >>> fastcalls()
+ 6
+ >>> timelimit(.05)(fastcalls)()
+ 6
+ >>> time.sleep(.2)
+ >>> timelimit(.05)(fastcalls)()
+ 6
+ >>> timelimit(.05)(fastcalls)()
+ 6
+ >>> time.sleep(.2)
+ >>> timelimit(.05)(fastcalls)()
+ 7
+ >>> fastcalls = memoize(slowfunc, None, background=True)
+ >>> threading.Thread(target=fastcalls).start()
+ >>> time.sleep(.01)
+ >>> fastcalls()
+ 9
+ """
+ def __init__(self, func, expires=None, background=True):
+ self.func = func
+ self.cache = {}
+ self.expires = expires
+ self.background = background
+ self.running = {}
+
+ def __call__(self, *args, **keywords):
+ key = (args, tuple(keywords.items()))
+ if not self.running.get(key):
+ self.running[key] = threading.Lock()
+ def update(block=False):
+ if self.running[key].acquire(block):
+ try:
+ self.cache[key] = (self.func(*args, **keywords), time.time())
+ finally:
+ self.running[key].release()
+
+ if key not in self.cache:
+ update(block=True)
+ elif self.expires and (time.time() - self.cache[key][1]) > self.expires:
+ if self.background:
+ threading.Thread(target=update).start()
+ else:
+ update()
+ return self.cache[key][0]
+
+memoize = Memoize
+
+re_compile = memoize(re.compile) #@@ threadsafe?
+re_compile.__doc__ = """
+A memoized version of re.compile.
+"""
+
+class _re_subm_proxy:
+ def __init__(self):
+ self.match = None
+ def __call__(self, match):
+ self.match = match
+ return ''
+
+def re_subm(pat, repl, string):
+ """
+ Like re.sub, but returns the replacement _and_ the match object.
+
+ >>> t, m = re_subm('g(oo+)fball', r'f\\1lish', 'goooooofball')
+ >>> t
+ 'foooooolish'
+ >>> m.groups()
+ ('oooooo',)
+ """
+ compiled_pat = re_compile(pat)
+ proxy = _re_subm_proxy()
+ compiled_pat.sub(proxy.__call__, string)
+ return compiled_pat.sub(repl, string), proxy.match
+
+def group(seq, size):
+ """
+ Returns an iterator over a series of lists of length size from iterable.
+
+ >>> list(group([1,2,3,4], 2))
+ [[1, 2], [3, 4]]
+ >>> list(group([1,2,3,4,5], 2))
+ [[1, 2], [3, 4], [5]]
+ """
+ def take(seq, n):
+ for i in xrange(n):
+ yield seq.next()
+
+ if not hasattr(seq, 'next'):
+ seq = iter(seq)
+ while True:
+ x = list(take(seq, size))
+ if x:
+ yield x
+ else:
+ break
+
+def uniq(seq, key=None):
+ """
+ Removes duplicate elements from a list while preserving the order of the rest.
+
+ >>> uniq([9,0,2,1,0])
+ [9, 0, 2, 1]
+
+ The value of the optional `key` parameter should be a function that
+ takes a single argument and returns a key to test the uniqueness.
+
+ >>> uniq(["Foo", "foo", "bar"], key=lambda s: s.lower())
+ ['Foo', 'bar']
+ """
+ key = key or (lambda x: x)
+ seen = set()
+ result = []
+ for v in seq:
+ k = key(v)
+ if k in seen:
+ continue
+ seen.add(k)
+ result.append(v)
+ return result
+
+def iterview(x):
+ """
+ Takes an iterable `x` and returns an iterator over it
+ which prints its progress to stderr as it iterates through.
+ """
+ WIDTH = 70
+
+ def plainformat(n, lenx):
+ return '%5.1f%% (%*d/%d)' % ((float(n)/lenx)*100, len(str(lenx)), n, lenx)
+
+ def bars(size, n, lenx):
+ val = int((float(n)*size)/lenx + 0.5)
+ if size - val:
+ spacing = ">" + (" "*(size-val))[1:]
+ else:
+ spacing = ""
+ return "[%s%s]" % ("="*val, spacing)
+
+ def eta(elapsed, n, lenx):
+ if n == 0:
+ return '--:--:--'
+ if n == lenx:
+ secs = int(elapsed)
+ else:
+ secs = int((elapsed/n) * (lenx-n))
+ mins, secs = divmod(secs, 60)
+ hrs, mins = divmod(mins, 60)
+
+ return '%02d:%02d:%02d' % (hrs, mins, secs)
+
+ def format(starttime, n, lenx):
+ out = plainformat(n, lenx) + ' '
+ if n == lenx:
+ end = ' '
+ else:
+ end = ' ETA '
+ end += eta(time.time() - starttime, n, lenx)
+ out += bars(WIDTH - len(out) - len(end), n, lenx)
+ out += end
+ return out
+
+ starttime = time.time()
+ lenx = len(x)
+ for n, y in enumerate(x):
+ sys.stderr.write('\r' + format(starttime, n, lenx))
+ yield y
+ sys.stderr.write('\r' + format(starttime, n+1, lenx) + '\n')
+
+class IterBetter:
+ """
+ Returns an object that can be used as an iterator
+ but can also be used via __getitem__ (although it
+ cannot go backwards -- that is, you cannot request
+ `iterbetter[0]` after requesting `iterbetter[1]`).
+
+ >>> import itertools
+ >>> c = iterbetter(itertools.count())
+ >>> c[1]
+ 1
+ >>> c[5]
+ 5
+ >>> c[3]
+ Traceback (most recent call last):
+ ...
+ IndexError: already passed 3
+
+ It is also possible to get the first value of the iterator or None.
+
+ >>> c = iterbetter(iter([3, 4, 5]))
+ >>> print c.first()
+ 3
+ >>> c = iterbetter(iter([]))
+ >>> print c.first()
+ None
+
+ For boolean test, IterBetter peeps at first value in the itertor without effecting the iteration.
+
+ >>> c = iterbetter(iter(range(5)))
+ >>> bool(c)
+ True
+ >>> list(c)
+ [0, 1, 2, 3, 4]
+ >>> c = iterbetter(iter([]))
+ >>> bool(c)
+ False
+ >>> list(c)
+ []
+ """
+ def __init__(self, iterator):
+ self.i, self.c = iterator, 0
+
+ def first(self, default=None):
+ """Returns the first element of the iterator or None when there are no
+ elements.
+
+ If the optional argument default is specified, that is returned instead
+ of None when there are no elements.
+ """
+ try:
+ return iter(self).next()
+ except StopIteration:
+ return default
+
+ def __iter__(self):
+ if hasattr(self, "_head"):
+ yield self._head
+
+ while 1:
+ yield self.i.next()
+ self.c += 1
+
+ def __getitem__(self, i):
+ #todo: slices
+ if i < self.c:
+ raise IndexError, "already passed "+str(i)
+ try:
+ while i > self.c:
+ self.i.next()
+ self.c += 1
+ # now self.c == i
+ self.c += 1
+ return self.i.next()
+ except StopIteration:
+ raise IndexError, str(i)
+
+ def __nonzero__(self):
+ if hasattr(self, "__len__"):
+ return len(self) != 0
+ elif hasattr(self, "_head"):
+ return True
+ else:
+ try:
+ self._head = self.i.next()
+ except StopIteration:
+ return False
+ else:
+ return True
+
+iterbetter = IterBetter
+
+def safeiter(it, cleanup=None, ignore_errors=True):
+ """Makes an iterator safe by ignoring the exceptions occured during the iteration.
+ """
+ def next():
+ while True:
+ try:
+ return it.next()
+ except StopIteration:
+ raise
+ except:
+ traceback.print_exc()
+
+ it = iter(it)
+ while True:
+ yield next()
+
+def safewrite(filename, content):
+ """Writes the content to a temp file and then moves the temp file to
+ given filename to avoid overwriting the existing file in case of errors.
+ """
+ f = file(filename + '.tmp', 'w')
+ f.write(content)
+ f.close()
+ os.rename(f.name, filename)
+
+def dictreverse(mapping):
+ """
+ Returns a new dictionary with keys and values swapped.
+
+ >>> dictreverse({1: 2, 3: 4})
+ {2: 1, 4: 3}
+ """
+ return dict([(value, key) for (key, value) in mapping.iteritems()])
+
+def dictfind(dictionary, element):
+ """
+ Returns a key whose value in `dictionary` is `element`
+ or, if none exists, None.
+
+ >>> d = {1:2, 3:4}
+ >>> dictfind(d, 4)
+ 3
+ >>> dictfind(d, 5)
+ """
+ for (key, value) in dictionary.iteritems():
+ if element is value:
+ return key
+
+def dictfindall(dictionary, element):
+ """
+ Returns the keys whose values in `dictionary` are `element`
+ or, if none exists, [].
+
+ >>> d = {1:4, 3:4}
+ >>> dictfindall(d, 4)
+ [1, 3]
+ >>> dictfindall(d, 5)
+ []
+ """
+ res = []
+ for (key, value) in dictionary.iteritems():
+ if element is value:
+ res.append(key)
+ return res
+
+def dictincr(dictionary, element):
+ """
+ Increments `element` in `dictionary`,
+ setting it to one if it doesn't exist.
+
+ >>> d = {1:2, 3:4}
+ >>> dictincr(d, 1)
+ 3
+ >>> d[1]
+ 3
+ >>> dictincr(d, 5)
+ 1
+ >>> d[5]
+ 1
+ """
+ dictionary.setdefault(element, 0)
+ dictionary[element] += 1
+ return dictionary[element]
+
+def dictadd(*dicts):
+ """
+ Returns a dictionary consisting of the keys in the argument dictionaries.
+ If they share a key, the value from the last argument is used.
+
+ >>> dictadd({1: 0, 2: 0}, {2: 1, 3: 1})
+ {1: 0, 2: 1, 3: 1}
+ """
+ result = {}
+ for dct in dicts:
+ result.update(dct)
+ return result
+
+def requeue(queue, index=-1):
+ """Returns the element at index after moving it to the beginning of the queue.
+
+ >>> x = [1, 2, 3, 4]
+ >>> requeue(x)
+ 4
+ >>> x
+ [4, 1, 2, 3]
+ """
+ x = queue.pop(index)
+ queue.insert(0, x)
+ return x
+
+def restack(stack, index=0):
+ """Returns the element at index after moving it to the top of stack.
+
+ >>> x = [1, 2, 3, 4]
+ >>> restack(x)
+ 1
+ >>> x
+ [2, 3, 4, 1]
+ """
+ x = stack.pop(index)
+ stack.append(x)
+ return x
+
+def listget(lst, ind, default=None):
+ """
+ Returns `lst[ind]` if it exists, `default` otherwise.
+
+ >>> listget(['a'], 0)
+ 'a'
+ >>> listget(['a'], 1)
+ >>> listget(['a'], 1, 'b')
+ 'b'
+ """
+ if len(lst)-1 < ind:
+ return default
+ return lst[ind]
+
+def intget(integer, default=None):
+ """
+ Returns `integer` as an int or `default` if it can't.
+
+ >>> intget('3')
+ 3
+ >>> intget('3a')
+ >>> intget('3a', 0)
+ 0
+ """
+ try:
+ return int(integer)
+ except (TypeError, ValueError):
+ return default
+
+def datestr(then, now=None):
+ """
+ Converts a (UTC) datetime object to a nice string representation.
+
+ >>> from datetime import datetime, timedelta
+ >>> d = datetime(1970, 5, 1)
+ >>> datestr(d, now=d)
+ '0 microseconds ago'
+ >>> for t, v in {
+ ... timedelta(microseconds=1): '1 microsecond ago',
+ ... timedelta(microseconds=2): '2 microseconds ago',
+ ... -timedelta(microseconds=1): '1 microsecond from now',
+ ... -timedelta(microseconds=2): '2 microseconds from now',
+ ... timedelta(microseconds=2000): '2 milliseconds ago',
+ ... timedelta(seconds=2): '2 seconds ago',
+ ... timedelta(seconds=2*60): '2 minutes ago',
+ ... timedelta(seconds=2*60*60): '2 hours ago',
+ ... timedelta(days=2): '2 days ago',
+ ... }.iteritems():
+ ... assert datestr(d, now=d+t) == v
+ >>> datestr(datetime(1970, 1, 1), now=d)
+ 'January 1'
+ >>> datestr(datetime(1969, 1, 1), now=d)
+ 'January 1, 1969'
+ >>> datestr(datetime(1970, 6, 1), now=d)
+ 'June 1, 1970'
+ >>> datestr(None)
+ ''
+ """
+ def agohence(n, what, divisor=None):
+ if divisor: n = n // divisor
+
+ out = str(abs(n)) + ' ' + what # '2 day'
+ if abs(n) != 1: out += 's' # '2 days'
+ out += ' ' # '2 days '
+ if n < 0:
+ out += 'from now'
+ else:
+ out += 'ago'
+ return out # '2 days ago'
+
+ oneday = 24 * 60 * 60
+
+ if not then: return ""
+ if not now: now = datetime.datetime.utcnow()
+ if type(now).__name__ == "DateTime":
+ now = datetime.datetime.fromtimestamp(now)
+ if type(then).__name__ == "DateTime":
+ then = datetime.datetime.fromtimestamp(then)
+ elif type(then).__name__ == "date":
+ then = datetime.datetime(then.year, then.month, then.day)
+
+ delta = now - then
+ deltaseconds = int(delta.days * oneday + delta.seconds + delta.microseconds * 1e-06)
+ deltadays = abs(deltaseconds) // oneday
+ if deltaseconds < 0: deltadays *= -1 # fix for oddity of floor
+
+ if deltadays:
+ if abs(deltadays) < 4:
+ return agohence(deltadays, 'day')
+
+ try:
+ out = then.strftime('%B %e') # e.g. 'June 3'
+ except ValueError:
+ # %e doesn't work on Windows.
+ out = then.strftime('%B %d') # e.g. 'June 03'
+
+ if then.year != now.year or deltadays < 0:
+ out += ', %s' % then.year
+ return out
+
+ if int(deltaseconds):
+ if abs(deltaseconds) > (60 * 60):
+ return agohence(deltaseconds, 'hour', 60 * 60)
+ elif abs(deltaseconds) > 60:
+ return agohence(deltaseconds, 'minute', 60)
+ else:
+ return agohence(deltaseconds, 'second')
+
+ deltamicroseconds = delta.microseconds
+ if delta.days: deltamicroseconds = int(delta.microseconds - 1e6) # datetime oddity
+ if abs(deltamicroseconds) > 1000:
+ return agohence(deltamicroseconds, 'millisecond', 1000)
+
+ return agohence(deltamicroseconds, 'microsecond')
+
+def numify(string):
+ """
+ Removes all non-digit characters from `string`.
+
+ >>> numify('800-555-1212')
+ '8005551212'
+ >>> numify('800.555.1212')
+ '8005551212'
+
+ """
+ return ''.join([c for c in str(string) if c.isdigit()])
+
+def denumify(string, pattern):
+ """
+ Formats `string` according to `pattern`, where the letter X gets replaced
+ by characters from `string`.
+
+ >>> denumify("8005551212", "(XXX) XXX-XXXX")
+ '(800) 555-1212'
+
+ """
+ out = []
+ for c in pattern:
+ if c == "X":
+ out.append(string[0])
+ string = string[1:]
+ else:
+ out.append(c)
+ return ''.join(out)
+
+def commify(n):
+ """
+ Add commas to an integer `n`.
+
+ >>> commify(1)
+ '1'
+ >>> commify(123)
+ '123'
+ >>> commify(1234)
+ '1,234'
+ >>> commify(1234567890)
+ '1,234,567,890'
+ >>> commify(123.0)
+ '123.0'
+ >>> commify(1234.5)
+ '1,234.5'
+ >>> commify(1234.56789)
+ '1,234.56789'
+ >>> commify('%.2f' % 1234.5)
+ '1,234.50'
+ >>> commify(None)
+ >>>
+
+ """
+ if n is None: return None
+ n = str(n)
+ if '.' in n:
+ dollars, cents = n.split('.')
+ else:
+ dollars, cents = n, None
+
+ r = []
+ for i, c in enumerate(str(dollars)[::-1]):
+ if i and (not (i % 3)):
+ r.insert(0, ',')
+ r.insert(0, c)
+ out = ''.join(r)
+ if cents:
+ out += '.' + cents
+ return out
+
+def dateify(datestring):
+ """
+ Formats a numified `datestring` properly.
+ """
+ return denumify(datestring, "XXXX-XX-XX XX:XX:XX")
+
+
+def nthstr(n):
+ """
+ Formats an ordinal.
+ Doesn't handle negative numbers.
+
+ >>> nthstr(1)
+ '1st'
+ >>> nthstr(0)
+ '0th'
+ >>> [nthstr(x) for x in [2, 3, 4, 5, 10, 11, 12, 13, 14, 15]]
+ ['2nd', '3rd', '4th', '5th', '10th', '11th', '12th', '13th', '14th', '15th']
+ >>> [nthstr(x) for x in [91, 92, 93, 94, 99, 100, 101, 102]]
+ ['91st', '92nd', '93rd', '94th', '99th', '100th', '101st', '102nd']
+ >>> [nthstr(x) for x in [111, 112, 113, 114, 115]]
+ ['111th', '112th', '113th', '114th', '115th']
+
+ """
+
+ assert n >= 0
+ if n % 100 in [11, 12, 13]: return '%sth' % n
+ return {1: '%sst', 2: '%snd', 3: '%srd'}.get(n % 10, '%sth') % n
+
+def cond(predicate, consequence, alternative=None):
+ """
+ Function replacement for if-else to use in expressions.
+
+ >>> x = 2
+ >>> cond(x % 2 == 0, "even", "odd")
+ 'even'
+ >>> cond(x % 2 == 0, "even", "odd") + '_row'
+ 'even_row'
+ """
+ if predicate:
+ return consequence
+ else:
+ return alternative
+
+class CaptureStdout:
+ """
+ Captures everything `func` prints to stdout and returns it instead.
+
+ >>> def idiot():
+ ... print "foo"
+ >>> capturestdout(idiot)()
+ 'foo\\n'
+
+ **WARNING:** Not threadsafe!
+ """
+ def __init__(self, func):
+ self.func = func
+ def __call__(self, *args, **keywords):
+ from cStringIO import StringIO
+ # Not threadsafe!
+ out = StringIO()
+ oldstdout = sys.stdout
+ sys.stdout = out
+ try:
+ self.func(*args, **keywords)
+ finally:
+ sys.stdout = oldstdout
+ return out.getvalue()
+
+capturestdout = CaptureStdout
+
+class Profile:
+ """
+ Profiles `func` and returns a tuple containing its output
+ and a string with human-readable profiling information.
+
+ >>> import time
+ >>> out, inf = profile(time.sleep)(.001)
+ >>> out
+ >>> inf[:10].strip()
+ 'took 0.0'
+ """
+ def __init__(self, func):
+ self.func = func
+ def __call__(self, *args): ##, **kw): kw unused
+ import hotshot, hotshot.stats, os, tempfile ##, time already imported
+ f, filename = tempfile.mkstemp()
+ os.close(f)
+
+ prof = hotshot.Profile(filename)
+
+ stime = time.time()
+ result = prof.runcall(self.func, *args)
+ stime = time.time() - stime
+ prof.close()
+
+ import cStringIO
+ out = cStringIO.StringIO()
+ stats = hotshot.stats.load(filename)
+ stats.stream = out
+ stats.strip_dirs()
+ stats.sort_stats('time', 'calls')
+ stats.print_stats(40)
+ stats.print_callers()
+
+ x = '\n\ntook '+ str(stime) + ' seconds\n'
+ x += out.getvalue()
+
+ # remove the tempfile
+ try:
+ os.remove(filename)
+ except IOError:
+ pass
+
+ return result, x
+
+profile = Profile
+
+
+import traceback
+# hack for compatibility with Python 2.3:
+if not hasattr(traceback, 'format_exc'):
+ from cStringIO import StringIO
+ def format_exc(limit=None):
+ strbuf = StringIO()
+ traceback.print_exc(limit, strbuf)
+ return strbuf.getvalue()
+ traceback.format_exc = format_exc
+
+def tryall(context, prefix=None):
+ """
+ Tries a series of functions and prints their results.
+ `context` is a dictionary mapping names to values;
+ the value will only be tried if it's callable.
+
+ >>> tryall(dict(j=lambda: True))
+ j: True
+ ----------------------------------------
+ results:
+ True: 1
+
+ For example, you might have a file `test/stuff.py`
+ with a series of functions testing various things in it.
+ At the bottom, have a line:
+
+ if __name__ == "__main__": tryall(globals())
+
+ Then you can run `python test/stuff.py` and get the results of
+ all the tests.
+ """
+ context = context.copy() # vars() would update
+ results = {}
+ for (key, value) in context.iteritems():
+ if not hasattr(value, '__call__'):
+ continue
+ if prefix and not key.startswith(prefix):
+ continue
+ print key + ':',
+ try:
+ r = value()
+ dictincr(results, r)
+ print r
+ except:
+ print 'ERROR'
+ dictincr(results, 'ERROR')
+ print ' ' + '\n '.join(traceback.format_exc().split('\n'))
+
+ print '-'*40
+ print 'results:'
+ for (key, value) in results.iteritems():
+ print ' '*2, str(key)+':', value
+
+class ThreadedDict(threadlocal):
+ """
+ Thread local storage.
+
+ >>> d = ThreadedDict()
+ >>> d.x = 1
+ >>> d.x
+ 1
+ >>> import threading
+ >>> def f(): d.x = 2
+ ...
+ >>> t = threading.Thread(target=f)
+ >>> t.start()
+ >>> t.join()
+ >>> d.x
+ 1
+ """
+ _instances = set()
+
+ def __init__(self):
+ ThreadedDict._instances.add(self)
+
+ def __del__(self):
+ ThreadedDict._instances.remove(self)
+
+ def __hash__(self):
+ return id(self)
+
+ def clear_all():
+ """Clears all ThreadedDict instances.
+ """
+ for t in list(ThreadedDict._instances):
+ t.clear()
+ clear_all = staticmethod(clear_all)
+
+ # Define all these methods to more or less fully emulate dict -- attribute access
+ # is built into threading.local.
+
+ def __getitem__(self, key):
+ return self.__dict__[key]
+
+ def __setitem__(self, key, value):
+ self.__dict__[key] = value
+
+ def __delitem__(self, key):
+ del self.__dict__[key]
+
+ def __contains__(self, key):
+ return key in self.__dict__
+
+ has_key = __contains__
+
+ def clear(self):
+ self.__dict__.clear()
+
+ def copy(self):
+ return self.__dict__.copy()
+
+ def get(self, key, default=None):
+ return self.__dict__.get(key, default)
+
+ def items(self):
+ return self.__dict__.items()
+
+ def iteritems(self):
+ return self.__dict__.iteritems()
+
+ def keys(self):
+ return self.__dict__.keys()
+
+ def iterkeys(self):
+ return self.__dict__.iterkeys()
+
+ iter = iterkeys
+
+ def values(self):
+ return self.__dict__.values()
+
+ def itervalues(self):
+ return self.__dict__.itervalues()
+
+ def pop(self, key, *args):
+ return self.__dict__.pop(key, *args)
+
+ def popitem(self):
+ return self.__dict__.popitem()
+
+ def setdefault(self, key, default=None):
+ return self.__dict__.setdefault(key, default)
+
+ def update(self, *args, **kwargs):
+ self.__dict__.update(*args, **kwargs)
+
+ def __repr__(self):
+ return '' % self.__dict__
+
+ __str__ = __repr__
+
+threadeddict = ThreadedDict
+
+def autoassign(self, locals):
+ """
+ Automatically assigns local variables to `self`.
+
+ >>> self = storage()
+ >>> autoassign(self, dict(a=1, b=2))
+ >>> self
+
+
+ Generally used in `__init__` methods, as in:
+
+ def __init__(self, foo, bar, baz=1): autoassign(self, locals())
+ """
+ for (key, value) in locals.iteritems():
+ if key == 'self':
+ continue
+ setattr(self, key, value)
+
+def to36(q):
+ """
+ Converts an integer to base 36 (a useful scheme for human-sayable IDs).
+
+ >>> to36(35)
+ 'z'
+ >>> to36(119292)
+ '2k1o'
+ >>> int(to36(939387374), 36)
+ 939387374
+ >>> to36(0)
+ '0'
+ >>> to36(-393)
+ Traceback (most recent call last):
+ ...
+ ValueError: must supply a positive integer
+
+ """
+ if q < 0: raise ValueError, "must supply a positive integer"
+ letters = "0123456789abcdefghijklmnopqrstuvwxyz"
+ converted = []
+ while q != 0:
+ q, r = divmod(q, 36)
+ converted.insert(0, letters[r])
+ return "".join(converted) or '0'
+
+
+r_url = re_compile('(?', text)
+ text = markdown(text)
+ return text
+
+def sendmail(from_address, to_address, subject, message, headers=None, **kw):
+ """
+ Sends the email message `message` with mail and envelope headers
+ for from `from_address_` to `to_address` with `subject`.
+ Additional email headers can be specified with the dictionary
+ `headers.
+
+ Optionally cc, bcc and attachments can be specified as keyword arguments.
+ Attachments must be an iterable and each attachment can be either a
+ filename or a file object or a dictionary with filename, content and
+ optionally content_type keys.
+
+ If `web.config.smtp_server` is set, it will send the message
+ to that SMTP server. Otherwise it will look for
+ `/usr/sbin/sendmail`, the typical location for the sendmail-style
+ binary. To use sendmail from a different path, set `web.config.sendmail_path`.
+ """
+ attachments = kw.pop("attachments", [])
+ mail = _EmailMessage(from_address, to_address, subject, message, headers, **kw)
+
+ for a in attachments:
+ if isinstance(a, dict):
+ mail.attach(a['filename'], a['content'], a.get('content_type'))
+ elif hasattr(a, 'read'): # file
+ filename = os.path.basename(getattr(a, "name", ""))
+ content_type = getattr(a, 'content_type', None)
+ mail.attach(filename, a.read(), content_type)
+ elif isinstance(a, basestring):
+ f = open(a, 'rb')
+ content = f.read()
+ f.close()
+ filename = os.path.basename(a)
+ mail.attach(filename, content, None)
+ else:
+ raise ValueError, "Invalid attachment: %s" % repr(a)
+
+ mail.send()
+
+class _EmailMessage:
+ def __init__(self, from_address, to_address, subject, message, headers=None, **kw):
+ def listify(x):
+ if not isinstance(x, list):
+ return [safestr(x)]
+ else:
+ return [safestr(a) for a in x]
+
+ subject = safestr(subject)
+ message = safestr(message)
+
+ from_address = safestr(from_address)
+ to_address = listify(to_address)
+ cc = listify(kw.get('cc', []))
+ bcc = listify(kw.get('bcc', []))
+ recipients = to_address + cc + bcc
+
+ import email.Utils
+ self.from_address = email.Utils.parseaddr(from_address)[1]
+ self.recipients = [email.Utils.parseaddr(r)[1] for r in recipients]
+
+ self.headers = dictadd({
+ 'From': from_address,
+ 'To': ", ".join(to_address),
+ 'Subject': subject
+ }, headers or {})
+
+ if cc:
+ self.headers['Cc'] = ", ".join(cc)
+
+ self.message = self.new_message()
+ self.message.add_header("Content-Transfer-Encoding", "7bit")
+ self.message.add_header("Content-Disposition", "inline")
+ self.message.add_header("MIME-Version", "1.0")
+ self.message.set_payload(message, 'utf-8')
+ self.multipart = False
+
+ def new_message(self):
+ from email.Message import Message
+ return Message()
+
+ def attach(self, filename, content, content_type=None):
+ if not self.multipart:
+ msg = self.new_message()
+ msg.add_header("Content-Type", "multipart/mixed")
+ msg.attach(self.message)
+ self.message = msg
+ self.multipart = True
+
+ import mimetypes
+ try:
+ from email import encoders
+ except:
+ from email import Encoders as encoders
+
+ content_type = content_type or mimetypes.guess_type(filename)[0] or "applcation/octet-stream"
+
+ msg = self.new_message()
+ msg.set_payload(content)
+ msg.add_header('Content-Type', content_type)
+ msg.add_header('Content-Disposition', 'attachment', filename=filename)
+
+ if not content_type.startswith("text/"):
+ encoders.encode_base64(msg)
+
+ self.message.attach(msg)
+
+ def prepare_message(self):
+ for k, v in self.headers.iteritems():
+ if k.lower() == "content-type":
+ self.message.set_type(v)
+ else:
+ self.message.add_header(k, v)
+
+ self.headers = {}
+
+ def send(self):
+ try:
+ import webapi
+ except ImportError:
+ webapi = Storage(config=Storage())
+
+ self.prepare_message()
+ message_text = self.message.as_string()
+
+ if webapi.config.get('smtp_server'):
+ server = webapi.config.get('smtp_server')
+ port = webapi.config.get('smtp_port', 0)
+ username = webapi.config.get('smtp_username')
+ password = webapi.config.get('smtp_password')
+ debug_level = webapi.config.get('smtp_debuglevel', None)
+ starttls = webapi.config.get('smtp_starttls', False)
+
+ import smtplib
+ smtpserver = smtplib.SMTP(server, port)
+
+ if debug_level:
+ smtpserver.set_debuglevel(debug_level)
+
+ if starttls:
+ smtpserver.ehlo()
+ smtpserver.starttls()
+ smtpserver.ehlo()
+
+ if username and password:
+ smtpserver.login(username, password)
+
+ smtpserver.sendmail(self.from_address, self.recipients, message_text)
+ smtpserver.quit()
+ elif webapi.config.get('email_engine') == 'aws':
+ import boto.ses
+ c = boto.ses.SESConnection(
+ aws_access_key_id=webapi.config.get('aws_access_key_id'),
+ aws_secret_access_key=web.api.config.get('aws_secret_access_key'))
+ c.send_raw_email(self.from_address, message_text, self.recipients)
+ else:
+ sendmail = webapi.config.get('sendmail_path', '/usr/sbin/sendmail')
+
+ assert not self.from_address.startswith('-'), 'security'
+ for r in self.recipients:
+ assert not r.startswith('-'), 'security'
+
+ cmd = [sendmail, '-f', self.from_address] + self.recipients
+
+ if subprocess:
+ p = subprocess.Popen(cmd, stdin=subprocess.PIPE)
+ p.stdin.write(message_text)
+ p.stdin.close()
+ p.wait()
+ else:
+ i, o = os.popen2(cmd)
+ i.write(message)
+ i.close()
+ o.close()
+ del i, o
+
+ def __repr__(self):
+ return ""
+
+ def __str__(self):
+ return self.message.as_string()
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/lib/nulib/python/nulib/ext/web/webapi.py b/lib/nulib/python/nulib/ext/web/webapi.py
new file mode 100644
index 0000000..b74396f
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/webapi.py
@@ -0,0 +1,546 @@
+"""
+Web API (wrapper around WSGI)
+(from web.py)
+"""
+
+__all__ = [
+ "config",
+ "header", "debug",
+ "input", "data",
+ "setcookie", "cookies",
+ "ctx",
+ "HTTPError",
+
+ # 200, 201, 202, 204
+ "OK", "Created", "Accepted", "NoContent",
+ "ok", "created", "accepted", "nocontent",
+
+ # 301, 302, 303, 304, 307
+ "Redirect", "Found", "SeeOther", "NotModified", "TempRedirect",
+ "redirect", "found", "seeother", "notmodified", "tempredirect",
+
+ # 400, 401, 403, 404, 405, 406, 409, 410, 412, 415, 451
+ "BadRequest", "Unauthorized", "Forbidden", "NotFound", "NoMethod", "NotAcceptable", "Conflict", "Gone", "PreconditionFailed", "UnsupportedMediaType", "UnavailableForLegalReasons",
+ "badrequest", "unauthorized", "forbidden", "notfound", "nomethod", "notacceptable", "conflict", "gone", "preconditionfailed", "unsupportedmediatype", "unavailableforlegalreasons",
+
+ # 500
+ "InternalError",
+ "internalerror",
+]
+
+import sys, cgi, Cookie, pprint, urlparse, urllib
+from utils import storage, storify, threadeddict, dictadd, intget, safestr
+
+config = storage()
+config.__doc__ = """
+A configuration object for various aspects of web.py.
+
+`debug`
+ : when True, enables reloading, disabled template caching and sets internalerror to debugerror.
+"""
+
+class HTTPError(Exception):
+ def __init__(self, status, headers={}, data=""):
+ ctx.status = status
+ for k, v in headers.items():
+ header(k, v)
+ self.data = data
+ Exception.__init__(self, status)
+
+def _status_code(status, data=None, classname=None, docstring=None):
+ if data is None:
+ data = status.split(" ", 1)[1]
+ classname = status.split(" ", 1)[1].replace(' ', '') # 304 Not Modified -> NotModified
+ docstring = docstring or '`%s` status' % status
+
+ def __init__(self, data=data, headers={}):
+ HTTPError.__init__(self, status, headers, data)
+
+ # trick to create class dynamically with dynamic docstring.
+ return type(classname, (HTTPError, object), {
+ '__doc__': docstring,
+ '__init__': __init__
+ })
+
+ok = OK = _status_code("200 OK", data="")
+created = Created = _status_code("201 Created")
+accepted = Accepted = _status_code("202 Accepted")
+nocontent = NoContent = _status_code("204 No Content")
+
+class Redirect(HTTPError):
+ """A `301 Moved Permanently` redirect."""
+ def __init__(self, url, status='301 Moved Permanently', absolute=False):
+ """
+ Returns a `status` redirect to the new URL.
+ `url` is joined with the base URL so that things like
+ `redirect("about") will work properly.
+ """
+ newloc = urlparse.urljoin(ctx.path, url)
+
+ if newloc.startswith('/'):
+ if absolute:
+ home = ctx.realhome
+ else:
+ home = ctx.home
+ newloc = home + newloc
+
+ headers = {
+ 'Content-Type': 'text/html',
+ 'Location': newloc
+ }
+ HTTPError.__init__(self, status, headers, "")
+
+redirect = Redirect
+
+class Found(Redirect):
+ """A `302 Found` redirect."""
+ def __init__(self, url, absolute=False):
+ Redirect.__init__(self, url, '302 Found', absolute=absolute)
+
+found = Found
+
+class SeeOther(Redirect):
+ """A `303 See Other` redirect."""
+ def __init__(self, url, absolute=False):
+ Redirect.__init__(self, url, '303 See Other', absolute=absolute)
+
+seeother = SeeOther
+
+class NotModified(HTTPError):
+ """A `304 Not Modified` status."""
+ def __init__(self):
+ HTTPError.__init__(self, "304 Not Modified")
+
+notmodified = NotModified
+
+class TempRedirect(Redirect):
+ """A `307 Temporary Redirect` redirect."""
+ def __init__(self, url, absolute=False):
+ Redirect.__init__(self, url, '307 Temporary Redirect', absolute=absolute)
+
+tempredirect = TempRedirect
+
+class BadRequest(HTTPError):
+ """`400 Bad Request` error."""
+ message = "bad request"
+ def __init__(self, message=None):
+ status = "400 Bad Request"
+ headers = {'Content-Type': 'text/html'}
+ HTTPError.__init__(self, status, headers, message or self.message)
+
+badrequest = BadRequest
+
+class Unauthorized(HTTPError):
+ """`401 Unauthorized` error."""
+ message = "unauthorized"
+ def __init__(self, message=None):
+ status = "401 Unauthorized"
+ headers = {'Content-Type': 'text/html'}
+ HTTPError.__init__(self, status, headers, message or self.message)
+
+unauthorized = Unauthorized
+
+class Forbidden(HTTPError):
+ """`403 Forbidden` error."""
+ message = "forbidden"
+ def __init__(self, message=None):
+ status = "403 Forbidden"
+ headers = {'Content-Type': 'text/html'}
+ HTTPError.__init__(self, status, headers, message or self.message)
+
+forbidden = Forbidden
+
+class _NotFound(HTTPError):
+ """`404 Not Found` error."""
+ message = "not found"
+ def __init__(self, message=None):
+ status = '404 Not Found'
+ headers = {'Content-Type': 'text/html'}
+ HTTPError.__init__(self, status, headers, message or self.message)
+
+def NotFound(message=None):
+ """Returns HTTPError with '404 Not Found' error from the active application.
+ """
+ if message:
+ return _NotFound(message)
+ elif ctx.get('app_stack'):
+ return ctx.app_stack[-1].notfound()
+ else:
+ return _NotFound()
+
+notfound = NotFound
+
+class NoMethod(HTTPError):
+ """A `405 Method Not Allowed` error."""
+ def __init__(self, cls=None):
+ status = '405 Method Not Allowed'
+ headers = {}
+ headers['Content-Type'] = 'text/html'
+
+ methods = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE']
+ if cls:
+ methods = [method for method in methods if hasattr(cls, method)]
+
+ headers['Allow'] = ', '.join(methods)
+ data = None
+ HTTPError.__init__(self, status, headers, data)
+
+nomethod = NoMethod
+
+class NotAcceptable(HTTPError):
+ """`406 Not Acceptable` error."""
+ message = "not acceptable"
+ def __init__(self, message=None):
+ status = "406 Not Acceptable"
+ headers = {'Content-Type': 'text/html'}
+ HTTPError.__init__(self, status, headers, message or self.message)
+
+notacceptable = NotAcceptable
+
+class Conflict(HTTPError):
+ """`409 Conflict` error."""
+ message = "conflict"
+ def __init__(self, message=None):
+ status = "409 Conflict"
+ headers = {'Content-Type': 'text/html'}
+ HTTPError.__init__(self, status, headers, message or self.message)
+
+conflict = Conflict
+
+class Gone(HTTPError):
+ """`410 Gone` error."""
+ message = "gone"
+ def __init__(self, message=None):
+ status = '410 Gone'
+ headers = {'Content-Type': 'text/html'}
+ HTTPError.__init__(self, status, headers, message or self.message)
+
+gone = Gone
+
+class PreconditionFailed(HTTPError):
+ """`412 Precondition Failed` error."""
+ message = "precondition failed"
+ def __init__(self, message=None):
+ status = "412 Precondition Failed"
+ headers = {'Content-Type': 'text/html'}
+ HTTPError.__init__(self, status, headers, message or self.message)
+
+preconditionfailed = PreconditionFailed
+
+class UnsupportedMediaType(HTTPError):
+ """`415 Unsupported Media Type` error."""
+ message = "unsupported media type"
+ def __init__(self, message=None):
+ status = "415 Unsupported Media Type"
+ headers = {'Content-Type': 'text/html'}
+ HTTPError.__init__(self, status, headers, message or self.message)
+
+unsupportedmediatype = UnsupportedMediaType
+
+class _UnavailableForLegalReasons(HTTPError):
+ """`451 Unavailable For Legal Reasons` error."""
+ message="unavailable for legal reasons"
+ def __init__(self, message=None):
+ status = "451 Unavailable For Legal Reasons"
+ headers = {'Content-Type': 'text/html'}
+ HTTPError.__init__(self, status, headers, message or self.message)
+
+def UnavailableForLegalReasons(message=None):
+ """Returns HTTPError with '415 Unavailable For Legal Reasons' error from the active application.
+ """
+ if message:
+ return _UnavailableForLegalReasons(message)
+ elif ctx.get('app_stack'):
+ return ctx.app_stack[-1].unavailableforlegalreasons()
+ else:
+ return _UnavailableForLegalReasons()
+
+unavailableforlegalreasons = UnavailableForLegalReasons
+
+class _InternalError(HTTPError):
+ """500 Internal Server Error`."""
+ message = "internal server error"
+
+ def __init__(self, message=None):
+ status = '500 Internal Server Error'
+ headers = {'Content-Type': 'text/html'}
+ HTTPError.__init__(self, status, headers, message or self.message)
+
+def InternalError(message=None):
+ """Returns HTTPError with '500 internal error' error from the active application.
+ """
+ if message:
+ return _InternalError(message)
+ elif ctx.get('app_stack'):
+ return ctx.app_stack[-1].internalerror()
+ else:
+ return _InternalError()
+
+internalerror = InternalError
+
+def header(hdr, value, unique=False):
+ """
+ Adds the header `hdr: value` with the response.
+
+ If `unique` is True and a header with that name already exists,
+ it doesn't add a new one.
+ """
+ hdr, value = safestr(hdr), safestr(value)
+ # protection against HTTP response splitting attack
+ if '\n' in hdr or '\r' in hdr or '\n' in value or '\r' in value:
+ raise ValueError, 'invalid characters in header'
+
+ if unique is True:
+ for h, v in ctx.headers:
+ if h.lower() == hdr.lower(): return
+
+ ctx.headers.append((hdr, value))
+
+def rawinput(method=None):
+ """Returns storage object with GET or POST arguments.
+ """
+ method = method or "both"
+ from cStringIO import StringIO
+
+ def dictify(fs):
+ # hack to make web.input work with enctype='text/plain.
+ if fs.list is None:
+ fs.list = []
+
+ return dict([(k, fs[k]) for k in fs.keys()])
+
+ e = ctx.env.copy()
+ a = b = {}
+
+ if method.lower() in ['both', 'post', 'put']:
+ if e['REQUEST_METHOD'] in ['POST', 'PUT']:
+ if e.get('CONTENT_TYPE', '').lower().startswith('multipart/'):
+ # since wsgi.input is directly passed to cgi.FieldStorage,
+ # it can not be called multiple times. Saving the FieldStorage
+ # object in ctx to allow calling web.input multiple times.
+ a = ctx.get('_fieldstorage')
+ if not a:
+ fp = e['wsgi.input']
+ a = cgi.FieldStorage(fp=fp, environ=e, keep_blank_values=1)
+ ctx._fieldstorage = a
+ else:
+ fp = StringIO(data())
+ a = cgi.FieldStorage(fp=fp, environ=e, keep_blank_values=1)
+ a = dictify(a)
+
+ if method.lower() in ['both', 'get']:
+ e['REQUEST_METHOD'] = 'GET'
+ b = dictify(cgi.FieldStorage(environ=e, keep_blank_values=1))
+
+ def process_fieldstorage(fs):
+ if isinstance(fs, list):
+ return [process_fieldstorage(x) for x in fs]
+ elif fs.filename is None:
+ return fs.value
+ else:
+ return fs
+
+ return storage([(k, process_fieldstorage(v)) for k, v in dictadd(b, a).items()])
+
+def input(*requireds, **defaults):
+ """
+ Returns a `storage` object with the GET and POST arguments.
+ See `storify` for how `requireds` and `defaults` work.
+ """
+ _method = defaults.pop('_method', 'both')
+ out = rawinput(_method)
+ try:
+ defaults.setdefault('_unicode', True) # force unicode conversion by default.
+ return storify(out, *requireds, **defaults)
+ except KeyError:
+ raise badrequest()
+
+def data():
+ """Returns the data sent with the request."""
+ if 'data' not in ctx:
+ cl = intget(ctx.env.get('CONTENT_LENGTH'), 0)
+ ctx.data = ctx.env['wsgi.input'].read(cl)
+ return ctx.data
+
+def setcookie(name, value, expires='', domain=None,
+ secure=False, httponly=False, path=None):
+ """Sets a cookie."""
+ morsel = Cookie.Morsel()
+ name, value = safestr(name), safestr(value)
+ morsel.set(name, value, urllib.quote(value))
+ if expires < 0:
+ expires = -1000000000
+ morsel['expires'] = expires
+ morsel['path'] = path or ctx.homepath+'/'
+ if domain:
+ morsel['domain'] = domain
+ if secure:
+ morsel['secure'] = secure
+ value = morsel.OutputString()
+ if httponly:
+ value += '; httponly'
+ header('Set-Cookie', value)
+
+def decode_cookie(value):
+ r"""Safely decodes a cookie value to unicode.
+
+ Tries us-ascii, utf-8 and io8859 encodings, in that order.
+
+ >>> decode_cookie('')
+ u''
+ >>> decode_cookie('asdf')
+ u'asdf'
+ >>> decode_cookie('foo \xC3\xA9 bar')
+ u'foo \xe9 bar'
+ >>> decode_cookie('foo \xE9 bar')
+ u'foo \xe9 bar'
+ """
+ try:
+ # First try plain ASCII encoding
+ return unicode(value, 'us-ascii')
+ except UnicodeError:
+ # Then try UTF-8, and if that fails, ISO8859
+ try:
+ return unicode(value, 'utf-8')
+ except UnicodeError:
+ return unicode(value, 'iso8859', 'ignore')
+
+def parse_cookies(http_cookie):
+ r"""Parse a HTTP_COOKIE header and return dict of cookie names and decoded values.
+
+ >>> sorted(parse_cookies('').items())
+ []
+ >>> sorted(parse_cookies('a=1').items())
+ [('a', '1')]
+ >>> sorted(parse_cookies('a=1%202').items())
+ [('a', '1 2')]
+ >>> sorted(parse_cookies('a=Z%C3%A9Z').items())
+ [('a', 'Z\xc3\xa9Z')]
+ >>> sorted(parse_cookies('a=1; b=2; c=3').items())
+ [('a', '1'), ('b', '2'), ('c', '3')]
+ >>> sorted(parse_cookies('a=1; b=w("x")|y=z; c=3').items())
+ [('a', '1'), ('b', 'w('), ('c', '3')]
+ >>> sorted(parse_cookies('a=1; b=w(%22x%22)|y=z; c=3').items())
+ [('a', '1'), ('b', 'w("x")|y=z'), ('c', '3')]
+
+ >>> sorted(parse_cookies('keebler=E=mc2').items())
+ [('keebler', 'E=mc2')]
+ >>> sorted(parse_cookies(r'keebler="E=mc2; L=\"Loves\"; fudge=\012;"').items())
+ [('keebler', 'E=mc2; L="Loves"; fudge=\n;')]
+ """
+ #print "parse_cookies"
+ if '"' in http_cookie:
+ # HTTP_COOKIE has quotes in it, use slow but correct cookie parsing
+ cookie = Cookie.SimpleCookie()
+ try:
+ cookie.load(http_cookie)
+ except Cookie.CookieError:
+ # If HTTP_COOKIE header is malformed, try at least to load the cookies we can by
+ # first splitting on ';' and loading each attr=value pair separately
+ cookie = Cookie.SimpleCookie()
+ for attr_value in http_cookie.split(';'):
+ try:
+ cookie.load(attr_value)
+ except Cookie.CookieError:
+ pass
+ cookies = dict([(k, urllib.unquote(v.value)) for k, v in cookie.iteritems()])
+ else:
+ # HTTP_COOKIE doesn't have quotes, use fast cookie parsing
+ cookies = {}
+ for key_value in http_cookie.split(';'):
+ key_value = key_value.split('=', 1)
+ if len(key_value) == 2:
+ key, value = key_value
+ cookies[key.strip()] = urllib.unquote(value.strip())
+ return cookies
+
+def cookies(*requireds, **defaults):
+ r"""Returns a `storage` object with all the request cookies in it.
+
+ See `storify` for how `requireds` and `defaults` work.
+
+ This is forgiving on bad HTTP_COOKIE input, it tries to parse at least
+ the cookies it can.
+
+ The values are converted to unicode if _unicode=True is passed.
+ """
+ # If _unicode=True is specified, use decode_cookie to convert cookie value to unicode
+ if defaults.get("_unicode") is True:
+ defaults['_unicode'] = decode_cookie
+
+ # parse cookie string and cache the result for next time.
+ if '_parsed_cookies' not in ctx:
+ http_cookie = ctx.env.get("HTTP_COOKIE", "")
+ ctx._parsed_cookies = parse_cookies(http_cookie)
+
+ try:
+ return storify(ctx._parsed_cookies, *requireds, **defaults)
+ except KeyError:
+ badrequest()
+ raise StopIteration
+
+def debug(*args):
+ """
+ Prints a prettyprinted version of `args` to stderr.
+ """
+ try:
+ out = ctx.environ['wsgi.errors']
+ except:
+ out = sys.stderr
+ for arg in args:
+ print >> out, pprint.pformat(arg)
+ return ''
+
+def _debugwrite(x):
+ try:
+ out = ctx.environ['wsgi.errors']
+ except:
+ out = sys.stderr
+ out.write(x)
+debug.write = _debugwrite
+
+ctx = context = threadeddict()
+
+ctx.__doc__ = """
+A `storage` object containing various information about the request:
+
+`environ` (aka `env`)
+ : A dictionary containing the standard WSGI environment variables.
+
+`host`
+ : The domain (`Host` header) requested by the user.
+
+`home`
+ : The base path for the application.
+
+`ip`
+ : The IP address of the requester.
+
+`method`
+ : The HTTP method used.
+
+`path`
+ : The path request.
+
+`query`
+ : If there are no query arguments, the empty string. Otherwise, a `?` followed
+ by the query string.
+
+`fullpath`
+ : The full path requested, including query arguments (`== path + query`).
+
+### Response Data
+
+`status` (default: "200 OK")
+ : The status code to be used in the response.
+
+`headers`
+ : A list of 2-tuples to be used in the response.
+
+`output`
+ : A string to be used as the response.
+"""
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
diff --git a/lib/nulib/python/nulib/ext/web/webopenid.py b/lib/nulib/python/nulib/ext/web/webopenid.py
new file mode 100644
index 0000000..b482216
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/webopenid.py
@@ -0,0 +1,115 @@
+"""openid.py: an openid library for web.py
+
+Notes:
+
+ - This will create a file called .openid_secret_key in the
+ current directory with your secret key in it. If someone
+ has access to this file they can log in as any user. And
+ if the app can't find this file for any reason (e.g. you
+ moved the app somewhere else) then each currently logged
+ in user will get logged out.
+
+ - State must be maintained through the entire auth process
+ -- this means that if you have multiple web.py processes
+ serving one set of URLs or if you restart your app often
+ then log ins will fail. You have to replace sessions and
+ store for things to work.
+
+ - We set cookies starting with "openid_".
+
+"""
+
+import os
+import random
+import hmac
+import __init__ as web
+import openid.consumer.consumer
+import openid.store.memstore
+
+sessions = {}
+store = openid.store.memstore.MemoryStore()
+
+def _secret():
+ try:
+ secret = file('.openid_secret_key').read()
+ except IOError:
+ # file doesn't exist
+ secret = os.urandom(20)
+ file('.openid_secret_key', 'w').write(secret)
+ return secret
+
+def _hmac(identity_url):
+ return hmac.new(_secret(), identity_url).hexdigest()
+
+def _random_session():
+ n = random.random()
+ while n in sessions:
+ n = random.random()
+ n = str(n)
+ return n
+
+def status():
+ oid_hash = web.cookies().get('openid_identity_hash', '').split(',', 1)
+ if len(oid_hash) > 1:
+ oid_hash, identity_url = oid_hash
+ if oid_hash == _hmac(identity_url):
+ return identity_url
+ return None
+
+def form(openid_loc):
+ oid = status()
+ if oid:
+ return '''
+ ''' % (openid_loc, oid, web.ctx.fullpath)
+ else:
+ return '''
+ ''' % (openid_loc, web.ctx.fullpath)
+
+def logout():
+ web.setcookie('openid_identity_hash', '', expires=-1)
+
+class host:
+ def POST(self):
+ # unlike the usual scheme of things, the POST is actually called
+ # first here
+ i = web.input(return_to='/')
+ if i.get('action') == 'logout':
+ logout()
+ return web.redirect(i.return_to)
+
+ i = web.input('openid', return_to='/')
+
+ n = _random_session()
+ sessions[n] = {'webpy_return_to': i.return_to}
+
+ c = openid.consumer.consumer.Consumer(sessions[n], store)
+ a = c.begin(i.openid)
+ f = a.redirectURL(web.ctx.home, web.ctx.home + web.ctx.fullpath)
+
+ web.setcookie('openid_session_id', n)
+ return web.redirect(f)
+
+ def GET(self):
+ n = web.cookies('openid_session_id').openid_session_id
+ web.setcookie('openid_session_id', '', expires=-1)
+ return_to = sessions[n]['webpy_return_to']
+
+ c = openid.consumer.consumer.Consumer(sessions[n], store)
+ a = c.complete(web.input(), web.ctx.home + web.ctx.fullpath)
+
+ if a.status.lower() == 'success':
+ web.setcookie('openid_identity_hash', _hmac(a.identity_url) + ',' + a.identity_url)
+
+ del sessions[n]
+ return web.redirect(return_to)
diff --git a/lib/nulib/python/nulib/ext/web/wsgi.py b/lib/nulib/python/nulib/ext/web/wsgi.py
new file mode 100644
index 0000000..bc7c4bc
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/wsgi.py
@@ -0,0 +1,75 @@
+"""
+WSGI Utilities
+(from web.py)
+"""
+
+import os, sys
+
+import http
+import webapi as web
+from utils import listget, intget
+from net import validaddr, validip
+import httpserver
+
+def runfcgi(func, addr=('localhost', 8000)):
+ """Runs a WSGI function as a FastCGI server."""
+ from ..flup.server import fcgi as flups
+ return flups.WSGIServer(func, multiplexed=True, bindAddress=addr, debug=False).run()
+
+def runscgi(func, addr=('localhost', 4000)):
+ """Runs a WSGI function as an SCGI server."""
+ from ..flup.server import scgi as flups
+ return flups.WSGIServer(func, bindAddress=addr, debug=False).run()
+
+def runwsgi(func):
+ """
+ Runs a WSGI-compatible `func` using FCGI, SCGI, or a simple web server,
+ as appropriate based on context and `sys.argv`.
+ """
+
+ if os.environ.has_key('SERVER_SOFTWARE'): # cgi
+ os.environ['FCGI_FORCE_CGI'] = 'Y'
+
+ if (os.environ.has_key('PHP_FCGI_CHILDREN') #lighttpd fastcgi
+ or os.environ.has_key('SERVER_SOFTWARE')):
+ return runfcgi(func, None)
+
+ if 'fcgi' in sys.argv or 'fastcgi' in sys.argv:
+ args = sys.argv[1:]
+ if 'fastcgi' in args: args.remove('fastcgi')
+ elif 'fcgi' in args: args.remove('fcgi')
+ if args:
+ return runfcgi(func, validaddr(args[0]))
+ else:
+ return runfcgi(func, None)
+
+ if 'scgi' in sys.argv:
+ args = sys.argv[1:]
+ args.remove('scgi')
+ if args:
+ return runscgi(func, validaddr(args[0]))
+ else:
+ return runscgi(func)
+
+
+ server_addr = validip(listget(sys.argv, 1, ''))
+ if os.environ.has_key('PORT'): # e.g. Heroku
+ server_addr = ('0.0.0.0', intget(os.environ['PORT']))
+
+ return httpserver.runsimple(func, server_addr)
+
+def _is_dev_mode():
+ # Some embedded python interpreters won't have sys.arv
+ # For details, see https://github.com/webpy/webpy/issues/87
+ argv = getattr(sys, "argv", [])
+
+ # quick hack to check if the program is running in dev mode.
+ if os.environ.has_key('SERVER_SOFTWARE') \
+ or os.environ.has_key('PHP_FCGI_CHILDREN') \
+ or 'fcgi' in argv or 'fastcgi' in argv \
+ or 'mod_wsgi' in argv:
+ return False
+ return True
+
+# When running the builtin-server, enable debug mode if not already set.
+web.config.setdefault('debug', _is_dev_mode())
diff --git a/lib/nulib/python/nulib/ext/web/wsgiserver/__init__.py b/lib/nulib/python/nulib/ext/web/wsgiserver/__init__.py
new file mode 100644
index 0000000..55d1dd9
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/wsgiserver/__init__.py
@@ -0,0 +1,2219 @@
+"""A high-speed, production ready, thread pooled, generic HTTP server.
+
+Simplest example on how to use this module directly
+(without using CherryPy's application machinery)::
+
+ from cherrypy import wsgiserver
+
+ def my_crazy_app(environ, start_response):
+ status = '200 OK'
+ response_headers = [('Content-type','text/plain')]
+ start_response(status, response_headers)
+ return ['Hello world!']
+
+ server = wsgiserver.CherryPyWSGIServer(
+ ('0.0.0.0', 8070), my_crazy_app,
+ server_name='www.cherrypy.example')
+ server.start()
+
+The CherryPy WSGI server can serve as many WSGI applications
+as you want in one instance by using a WSGIPathInfoDispatcher::
+
+ d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
+ server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
+
+Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
+
+This won't call the CherryPy engine (application side) at all, only the
+HTTP server, which is independent from the rest of CherryPy. Don't
+let the name "CherryPyWSGIServer" throw you; the name merely reflects
+its origin, not its coupling.
+
+For those of you wanting to understand internals of this module, here's the
+basic call flow. The server's listening thread runs a very tight loop,
+sticking incoming connections onto a Queue::
+
+ server = CherryPyWSGIServer(...)
+ server.start()
+ while True:
+ tick()
+ # This blocks until a request comes in:
+ child = socket.accept()
+ conn = HTTPConnection(child, ...)
+ server.requests.put(conn)
+
+Worker threads are kept in a pool and poll the Queue, popping off and then
+handling each connection in turn. Each connection can consist of an arbitrary
+number of requests and their responses, so we run a nested loop::
+
+ while True:
+ conn = server.requests.get()
+ conn.communicate()
+ -> while True:
+ req = HTTPRequest(...)
+ req.parse_request()
+ -> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
+ req.rfile.readline()
+ read_headers(req.rfile, req.inheaders)
+ req.respond()
+ -> response = app(...)
+ try:
+ for chunk in response:
+ if chunk:
+ req.write(chunk)
+ finally:
+ if hasattr(response, "close"):
+ response.close()
+ if req.close_connection:
+ return
+"""
+
+CRLF = '\r\n'
+import os
+import Queue
+import re
+quoted_slash = re.compile("(?i)%2F")
+import rfc822
+import socket
+import sys
+if 'win' in sys.platform and not hasattr(socket, 'IPPROTO_IPV6'):
+ socket.IPPROTO_IPV6 = 41
+try:
+ import cStringIO as StringIO
+except ImportError:
+ import StringIO
+DEFAULT_BUFFER_SIZE = -1
+
+_fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestring)
+
+import threading
+import time
+import traceback
+def format_exc(limit=None):
+ """Like print_exc() but return a string. Backport for Python 2.3."""
+ try:
+ etype, value, tb = sys.exc_info()
+ return ''.join(traceback.format_exception(etype, value, tb, limit))
+ finally:
+ etype = value = tb = None
+
+
+from urllib import unquote
+from urlparse import urlparse
+import warnings
+
+import errno
+
+def plat_specific_errors(*errnames):
+ """Return error numbers for all errors in errnames on this platform.
+
+ The 'errno' module contains different global constants depending on
+ the specific platform (OS). This function will return the list of
+ numeric values for a given list of potential names.
+ """
+ errno_names = dir(errno)
+ nums = [getattr(errno, k) for k in errnames if k in errno_names]
+ # de-dupe the list
+ return dict.fromkeys(nums).keys()
+
+socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
+
+socket_errors_to_ignore = plat_specific_errors(
+ "EPIPE",
+ "EBADF", "WSAEBADF",
+ "ENOTSOCK", "WSAENOTSOCK",
+ "ETIMEDOUT", "WSAETIMEDOUT",
+ "ECONNREFUSED", "WSAECONNREFUSED",
+ "ECONNRESET", "WSAECONNRESET",
+ "ECONNABORTED", "WSAECONNABORTED",
+ "ENETRESET", "WSAENETRESET",
+ "EHOSTDOWN", "EHOSTUNREACH",
+ )
+socket_errors_to_ignore.append("timed out")
+socket_errors_to_ignore.append("The read operation timed out")
+
+socket_errors_nonblocking = plat_specific_errors(
+ 'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
+
+comma_separated_headers = ['Accept', 'Accept-Charset', 'Accept-Encoding',
+ 'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
+ 'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
+ 'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
+ 'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
+ 'WWW-Authenticate']
+
+
+import logging
+if not hasattr(logging, 'statistics'): logging.statistics = {}
+
+
+def read_headers(rfile, hdict=None):
+ """Read headers from the given stream into the given header dict.
+
+ If hdict is None, a new header dict is created. Returns the populated
+ header dict.
+
+ Headers which are repeated are folded together using a comma if their
+ specification so dictates.
+
+ This function raises ValueError when the read bytes violate the HTTP spec.
+ You should probably return "400 Bad Request" if this happens.
+ """
+ if hdict is None:
+ hdict = {}
+
+ while True:
+ line = rfile.readline()
+ if not line:
+ # No more data--illegal end of headers
+ raise ValueError("Illegal end of headers.")
+
+ if line == CRLF:
+ # Normal end of headers
+ break
+ if not line.endswith(CRLF):
+ raise ValueError("HTTP requires CRLF terminators")
+
+ if line[0] in ' \t':
+ # It's a continuation line.
+ v = line.strip()
+ else:
+ try:
+ k, v = line.split(":", 1)
+ except ValueError:
+ raise ValueError("Illegal header line.")
+ # TODO: what about TE and WWW-Authenticate?
+ k = k.strip().title()
+ v = v.strip()
+ hname = k
+
+ if k in comma_separated_headers:
+ existing = hdict.get(hname)
+ if existing:
+ v = ", ".join((existing, v))
+ hdict[hname] = v
+
+ return hdict
+
+
+class MaxSizeExceeded(Exception):
+ pass
+
+class SizeCheckWrapper(object):
+ """Wraps a file-like object, raising MaxSizeExceeded if too large."""
+
+ def __init__(self, rfile, maxlen):
+ self.rfile = rfile
+ self.maxlen = maxlen
+ self.bytes_read = 0
+
+ def _check_length(self):
+ if self.maxlen and self.bytes_read > self.maxlen:
+ raise MaxSizeExceeded()
+
+ def read(self, size=None):
+ data = self.rfile.read(size)
+ self.bytes_read += len(data)
+ self._check_length()
+ return data
+
+ def readline(self, size=None):
+ if size is not None:
+ data = self.rfile.readline(size)
+ self.bytes_read += len(data)
+ self._check_length()
+ return data
+
+ # User didn't specify a size ...
+ # We read the line in chunks to make sure it's not a 100MB line !
+ res = []
+ while True:
+ data = self.rfile.readline(256)
+ self.bytes_read += len(data)
+ self._check_length()
+ res.append(data)
+ # See http://www.cherrypy.org/ticket/421
+ if len(data) < 256 or data[-1:] == "\n":
+ return ''.join(res)
+
+ def readlines(self, sizehint=0):
+ # Shamelessly stolen from StringIO
+ total = 0
+ lines = []
+ line = self.readline()
+ while line:
+ lines.append(line)
+ total += len(line)
+ if 0 < sizehint <= total:
+ break
+ line = self.readline()
+ return lines
+
+ def close(self):
+ self.rfile.close()
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ data = self.rfile.next()
+ self.bytes_read += len(data)
+ self._check_length()
+ return data
+
+
+class KnownLengthRFile(object):
+ """Wraps a file-like object, returning an empty string when exhausted."""
+
+ def __init__(self, rfile, content_length):
+ self.rfile = rfile
+ self.remaining = content_length
+
+ def read(self, size=None):
+ if self.remaining == 0:
+ return ''
+ if size is None:
+ size = self.remaining
+ else:
+ size = min(size, self.remaining)
+
+ data = self.rfile.read(size)
+ self.remaining -= len(data)
+ return data
+
+ def readline(self, size=None):
+ if self.remaining == 0:
+ return ''
+ if size is None:
+ size = self.remaining
+ else:
+ size = min(size, self.remaining)
+
+ data = self.rfile.readline(size)
+ self.remaining -= len(data)
+ return data
+
+ def readlines(self, sizehint=0):
+ # Shamelessly stolen from StringIO
+ total = 0
+ lines = []
+ line = self.readline(sizehint)
+ while line:
+ lines.append(line)
+ total += len(line)
+ if 0 < sizehint <= total:
+ break
+ line = self.readline(sizehint)
+ return lines
+
+ def close(self):
+ self.rfile.close()
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ data = next(self.rfile)
+ self.remaining -= len(data)
+ return data
+
+
+class ChunkedRFile(object):
+ """Wraps a file-like object, returning an empty string when exhausted.
+
+ This class is intended to provide a conforming wsgi.input value for
+ request entities that have been encoded with the 'chunked' transfer
+ encoding.
+ """
+
+ def __init__(self, rfile, maxlen, bufsize=8192):
+ self.rfile = rfile
+ self.maxlen = maxlen
+ self.bytes_read = 0
+ self.buffer = ''
+ self.bufsize = bufsize
+ self.closed = False
+
+ def _fetch(self):
+ if self.closed:
+ return
+
+ line = self.rfile.readline()
+ self.bytes_read += len(line)
+
+ if self.maxlen and self.bytes_read > self.maxlen:
+ raise MaxSizeExceeded("Request Entity Too Large", self.maxlen)
+
+ line = line.strip().split(";", 1)
+
+ try:
+ chunk_size = line.pop(0)
+ chunk_size = int(chunk_size, 16)
+ except ValueError:
+ raise ValueError("Bad chunked transfer size: " + repr(chunk_size))
+
+ if chunk_size <= 0:
+ self.closed = True
+ return
+
+## if line: chunk_extension = line[0]
+
+ if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
+ raise IOError("Request Entity Too Large")
+
+ chunk = self.rfile.read(chunk_size)
+ self.bytes_read += len(chunk)
+ self.buffer += chunk
+
+ crlf = self.rfile.read(2)
+ if crlf != CRLF:
+ raise ValueError(
+ "Bad chunked transfer coding (expected '\\r\\n', "
+ "got " + repr(crlf) + ")")
+
+ def read(self, size=None):
+ data = ''
+ while True:
+ if size and len(data) >= size:
+ return data
+
+ if not self.buffer:
+ self._fetch()
+ if not self.buffer:
+ # EOF
+ return data
+
+ if size:
+ remaining = size - len(data)
+ data += self.buffer[:remaining]
+ self.buffer = self.buffer[remaining:]
+ else:
+ data += self.buffer
+
+ def readline(self, size=None):
+ data = ''
+ while True:
+ if size and len(data) >= size:
+ return data
+
+ if not self.buffer:
+ self._fetch()
+ if not self.buffer:
+ # EOF
+ return data
+
+ newline_pos = self.buffer.find('\n')
+ if size:
+ if newline_pos == -1:
+ remaining = size - len(data)
+ data += self.buffer[:remaining]
+ self.buffer = self.buffer[remaining:]
+ else:
+ remaining = min(size - len(data), newline_pos)
+ data += self.buffer[:remaining]
+ self.buffer = self.buffer[remaining:]
+ else:
+ if newline_pos == -1:
+ data += self.buffer
+ else:
+ data += self.buffer[:newline_pos]
+ self.buffer = self.buffer[newline_pos:]
+
+ def readlines(self, sizehint=0):
+ # Shamelessly stolen from StringIO
+ total = 0
+ lines = []
+ line = self.readline(sizehint)
+ while line:
+ lines.append(line)
+ total += len(line)
+ if 0 < sizehint <= total:
+ break
+ line = self.readline(sizehint)
+ return lines
+
+ def read_trailer_lines(self):
+ if not self.closed:
+ raise ValueError(
+ "Cannot read trailers until the request body has been read.")
+
+ while True:
+ line = self.rfile.readline()
+ if not line:
+ # No more data--illegal end of headers
+ raise ValueError("Illegal end of headers.")
+
+ self.bytes_read += len(line)
+ if self.maxlen and self.bytes_read > self.maxlen:
+ raise IOError("Request Entity Too Large")
+
+ if line == CRLF:
+ # Normal end of headers
+ break
+ if not line.endswith(CRLF):
+ raise ValueError("HTTP requires CRLF terminators")
+
+ yield line
+
+ def close(self):
+ self.rfile.close()
+
+ def __iter__(self):
+ # Shamelessly stolen from StringIO
+ total = 0
+ line = self.readline(sizehint)
+ while line:
+ yield line
+ total += len(line)
+ if 0 < sizehint <= total:
+ break
+ line = self.readline(sizehint)
+
+
+class HTTPRequest(object):
+ """An HTTP Request (and response).
+
+ A single HTTP connection may consist of multiple request/response pairs.
+ """
+
+ server = None
+ """The HTTPServer object which is receiving this request."""
+
+ conn = None
+ """The HTTPConnection object on which this request connected."""
+
+ inheaders = {}
+ """A dict of request headers."""
+
+ outheaders = []
+ """A list of header tuples to write in the response."""
+
+ ready = False
+ """When True, the request has been parsed and is ready to begin generating
+ the response. When False, signals the calling Connection that the response
+ should not be generated and the connection should close."""
+
+ close_connection = False
+ """Signals the calling Connection that the request should close. This does
+ not imply an error! The client and/or server may each request that the
+ connection be closed."""
+
+ chunked_write = False
+ """If True, output will be encoded with the "chunked" transfer-coding.
+
+ This value is set automatically inside send_headers."""
+
+ def __init__(self, server, conn):
+ self.server= server
+ self.conn = conn
+
+ self.ready = False
+ self.started_request = False
+ self.scheme = "http"
+ if self.server.ssl_adapter is not None:
+ self.scheme = "https"
+ # Use the lowest-common protocol in case read_request_line errors.
+ self.response_protocol = 'HTTP/1.0'
+ self.inheaders = {}
+
+ self.status = ""
+ self.outheaders = []
+ self.sent_headers = False
+ self.close_connection = self.__class__.close_connection
+ self.chunked_read = False
+ self.chunked_write = self.__class__.chunked_write
+
+ def parse_request(self):
+ """Parse the next HTTP request start-line and message-headers."""
+ self.rfile = SizeCheckWrapper(self.conn.rfile,
+ self.server.max_request_header_size)
+ try:
+ self.read_request_line()
+ except MaxSizeExceeded:
+ self.simple_response("414 Request-URI Too Long",
+ "The Request-URI sent with the request exceeds the maximum "
+ "allowed bytes.")
+ return
+
+ try:
+ success = self.read_request_headers()
+ except MaxSizeExceeded:
+ self.simple_response("413 Request Entity Too Large",
+ "The headers sent with the request exceed the maximum "
+ "allowed bytes.")
+ return
+ else:
+ if not success:
+ return
+
+ self.ready = True
+
+ def read_request_line(self):
+ # HTTP/1.1 connections are persistent by default. If a client
+ # requests a page, then idles (leaves the connection open),
+ # then rfile.readline() will raise socket.error("timed out").
+ # Note that it does this based on the value given to settimeout(),
+ # and doesn't need the client to request or acknowledge the close
+ # (although your TCP stack might suffer for it: cf Apache's history
+ # with FIN_WAIT_2).
+ request_line = self.rfile.readline()
+
+ # Set started_request to True so communicate() knows to send 408
+ # from here on out.
+ self.started_request = True
+ if not request_line:
+ # Force self.ready = False so the connection will close.
+ self.ready = False
+ return
+
+ if request_line == CRLF:
+ # RFC 2616 sec 4.1: "...if the server is reading the protocol
+ # stream at the beginning of a message and receives a CRLF
+ # first, it should ignore the CRLF."
+ # But only ignore one leading line! else we enable a DoS.
+ request_line = self.rfile.readline()
+ if not request_line:
+ self.ready = False
+ return
+
+ if not request_line.endswith(CRLF):
+ self.simple_response("400 Bad Request", "HTTP requires CRLF terminators")
+ return
+
+ try:
+ method, uri, req_protocol = request_line.strip().split(" ", 2)
+ rp = int(req_protocol[5]), int(req_protocol[7])
+ except (ValueError, IndexError):
+ self.simple_response("400 Bad Request", "Malformed Request-Line")
+ return
+
+ self.uri = uri
+ self.method = method
+
+ # uri may be an abs_path (including "http://host.domain.tld");
+ scheme, authority, path = self.parse_request_uri(uri)
+ if '#' in path:
+ self.simple_response("400 Bad Request",
+ "Illegal #fragment in Request-URI.")
+ return
+
+ if scheme:
+ self.scheme = scheme
+
+ qs = ''
+ if '?' in path:
+ path, qs = path.split('?', 1)
+
+ # Unquote the path+params (e.g. "/this%20path" -> "/this path").
+ # http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
+ #
+ # But note that "...a URI must be separated into its components
+ # before the escaped characters within those components can be
+ # safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
+ # Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path".
+ try:
+ atoms = [unquote(x) for x in quoted_slash.split(path)]
+ except ValueError, ex:
+ self.simple_response("400 Bad Request", ex.args[0])
+ return
+ path = "%2F".join(atoms)
+ self.path = path
+
+ # Note that, like wsgiref and most other HTTP servers,
+ # we "% HEX HEX"-unquote the path but not the query string.
+ self.qs = qs
+
+ # Compare request and server HTTP protocol versions, in case our
+ # server does not support the requested protocol. Limit our output
+ # to min(req, server). We want the following output:
+ # request server actual written supported response
+ # protocol protocol response protocol feature set
+ # a 1.0 1.0 1.0 1.0
+ # b 1.0 1.1 1.1 1.0
+ # c 1.1 1.0 1.0 1.0
+ # d 1.1 1.1 1.1 1.1
+ # Notice that, in (b), the response will be "HTTP/1.1" even though
+ # the client only understands 1.0. RFC 2616 10.5.6 says we should
+ # only return 505 if the _major_ version is different.
+ sp = int(self.server.protocol[5]), int(self.server.protocol[7])
+
+ if sp[0] != rp[0]:
+ self.simple_response("505 HTTP Version Not Supported")
+ return
+ self.request_protocol = req_protocol
+ self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
+
+ def read_request_headers(self):
+ """Read self.rfile into self.inheaders. Return success."""
+
+ # then all the http headers
+ try:
+ read_headers(self.rfile, self.inheaders)
+ except ValueError, ex:
+ self.simple_response("400 Bad Request", ex.args[0])
+ return False
+
+ mrbs = self.server.max_request_body_size
+ if mrbs and int(self.inheaders.get("Content-Length", 0)) > mrbs:
+ self.simple_response("413 Request Entity Too Large",
+ "The entity sent with the request exceeds the maximum "
+ "allowed bytes.")
+ return False
+
+ # Persistent connection support
+ if self.response_protocol == "HTTP/1.1":
+ # Both server and client are HTTP/1.1
+ if self.inheaders.get("Connection", "") == "close":
+ self.close_connection = True
+ else:
+ # Either the server or client (or both) are HTTP/1.0
+ if self.inheaders.get("Connection", "") != "Keep-Alive":
+ self.close_connection = True
+
+ # Transfer-Encoding support
+ te = None
+ if self.response_protocol == "HTTP/1.1":
+ te = self.inheaders.get("Transfer-Encoding")
+ if te:
+ te = [x.strip().lower() for x in te.split(",") if x.strip()]
+
+ self.chunked_read = False
+
+ if te:
+ for enc in te:
+ if enc == "chunked":
+ self.chunked_read = True
+ else:
+ # Note that, even if we see "chunked", we must reject
+ # if there is an extension we don't recognize.
+ self.simple_response("501 Unimplemented")
+ self.close_connection = True
+ return False
+
+ # From PEP 333:
+ # "Servers and gateways that implement HTTP 1.1 must provide
+ # transparent support for HTTP 1.1's "expect/continue" mechanism.
+ # This may be done in any of several ways:
+ # 1. Respond to requests containing an Expect: 100-continue request
+ # with an immediate "100 Continue" response, and proceed normally.
+ # 2. Proceed with the request normally, but provide the application
+ # with a wsgi.input stream that will send the "100 Continue"
+ # response if/when the application first attempts to read from
+ # the input stream. The read request must then remain blocked
+ # until the client responds.
+ # 3. Wait until the client decides that the server does not support
+ # expect/continue, and sends the request body on its own.
+ # (This is suboptimal, and is not recommended.)
+ #
+ # We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
+ # but it seems like it would be a big slowdown for such a rare case.
+ if self.inheaders.get("Expect", "") == "100-continue":
+ # Don't use simple_response here, because it emits headers
+ # we don't want. See http://www.cherrypy.org/ticket/951
+ msg = self.server.protocol + " 100 Continue\r\n\r\n"
+ try:
+ self.conn.wfile.sendall(msg)
+ except socket.error, x:
+ if x.args[0] not in socket_errors_to_ignore:
+ raise
+ return True
+
+ def parse_request_uri(self, uri):
+ """Parse a Request-URI into (scheme, authority, path).
+
+ Note that Request-URI's must be one of::
+
+ Request-URI = "*" | absoluteURI | abs_path | authority
+
+ Therefore, a Request-URI which starts with a double forward-slash
+ cannot be a "net_path"::
+
+ net_path = "//" authority [ abs_path ]
+
+ Instead, it must be interpreted as an "abs_path" with an empty first
+ path segment::
+
+ abs_path = "/" path_segments
+ path_segments = segment *( "/" segment )
+ segment = *pchar *( ";" param )
+ param = *pchar
+ """
+ if uri == "*":
+ return None, None, uri
+
+ i = uri.find('://')
+ if i > 0 and '?' not in uri[:i]:
+ # An absoluteURI.
+ # If there's a scheme (and it must be http or https), then:
+ # http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]]
+ scheme, remainder = uri[:i].lower(), uri[i + 3:]
+ authority, path = remainder.split("/", 1)
+ return scheme, authority, path
+
+ if uri.startswith('/'):
+ # An abs_path.
+ return None, None, uri
+ else:
+ # An authority.
+ return None, uri, None
+
+ def respond(self):
+ """Call the gateway and write its iterable output."""
+ mrbs = self.server.max_request_body_size
+ if self.chunked_read:
+ self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
+ else:
+ cl = int(self.inheaders.get("Content-Length", 0))
+ if mrbs and mrbs < cl:
+ if not self.sent_headers:
+ self.simple_response("413 Request Entity Too Large",
+ "The entity sent with the request exceeds the maximum "
+ "allowed bytes.")
+ return
+ self.rfile = KnownLengthRFile(self.conn.rfile, cl)
+
+ self.server.gateway(self).respond()
+
+ if (self.ready and not self.sent_headers):
+ self.sent_headers = True
+ self.send_headers()
+ if self.chunked_write:
+ self.conn.wfile.sendall("0\r\n\r\n")
+
+ def simple_response(self, status, msg=""):
+ """Write a simple response back to the client."""
+ status = str(status)
+ buf = [self.server.protocol + " " +
+ status + CRLF,
+ "Content-Length: %s\r\n" % len(msg),
+ "Content-Type: text/plain\r\n"]
+
+ if status[:3] in ("413", "414"):
+ # Request Entity Too Large / Request-URI Too Long
+ self.close_connection = True
+ if self.response_protocol == 'HTTP/1.1':
+ # This will not be true for 414, since read_request_line
+ # usually raises 414 before reading the whole line, and we
+ # therefore cannot know the proper response_protocol.
+ buf.append("Connection: close\r\n")
+ else:
+ # HTTP/1.0 had no 413/414 status nor Connection header.
+ # Emit 400 instead and trust the message body is enough.
+ status = "400 Bad Request"
+
+ buf.append(CRLF)
+ if msg:
+ if isinstance(msg, unicode):
+ msg = msg.encode("ISO-8859-1")
+ buf.append(msg)
+
+ try:
+ self.conn.wfile.sendall("".join(buf))
+ except socket.error, x:
+ if x.args[0] not in socket_errors_to_ignore:
+ raise
+
+ def write(self, chunk):
+ """Write unbuffered data to the client."""
+ if self.chunked_write and chunk:
+ buf = [hex(len(chunk))[2:], CRLF, chunk, CRLF]
+ self.conn.wfile.sendall("".join(buf))
+ else:
+ self.conn.wfile.sendall(chunk)
+
+ def send_headers(self):
+ """Assert, process, and send the HTTP response message-headers.
+
+ You must set self.status, and self.outheaders before calling this.
+ """
+ hkeys = [key.lower() for key, value in self.outheaders]
+ status = int(self.status[:3])
+
+ if status == 413:
+ # Request Entity Too Large. Close conn to avoid garbage.
+ self.close_connection = True
+ elif "content-length" not in hkeys:
+ # "All 1xx (informational), 204 (no content),
+ # and 304 (not modified) responses MUST NOT
+ # include a message-body." So no point chunking.
+ if status < 200 or status in (204, 205, 304):
+ pass
+ else:
+ if (self.response_protocol == 'HTTP/1.1'
+ and self.method != 'HEAD'):
+ # Use the chunked transfer-coding
+ self.chunked_write = True
+ self.outheaders.append(("Transfer-Encoding", "chunked"))
+ else:
+ # Closing the conn is the only way to determine len.
+ self.close_connection = True
+
+ if "connection" not in hkeys:
+ if self.response_protocol == 'HTTP/1.1':
+ # Both server and client are HTTP/1.1 or better
+ if self.close_connection:
+ self.outheaders.append(("Connection", "close"))
+ else:
+ # Server and/or client are HTTP/1.0
+ if not self.close_connection:
+ self.outheaders.append(("Connection", "Keep-Alive"))
+
+ if (not self.close_connection) and (not self.chunked_read):
+ # Read any remaining request body data on the socket.
+ # "If an origin server receives a request that does not include an
+ # Expect request-header field with the "100-continue" expectation,
+ # the request includes a request body, and the server responds
+ # with a final status code before reading the entire request body
+ # from the transport connection, then the server SHOULD NOT close
+ # the transport connection until it has read the entire request,
+ # or until the client closes the connection. Otherwise, the client
+ # might not reliably receive the response message. However, this
+ # requirement is not be construed as preventing a server from
+ # defending itself against denial-of-service attacks, or from
+ # badly broken client implementations."
+ remaining = getattr(self.rfile, 'remaining', 0)
+ if remaining > 0:
+ self.rfile.read(remaining)
+
+ if "date" not in hkeys:
+ self.outheaders.append(("Date", rfc822.formatdate()))
+
+ if "server" not in hkeys:
+ self.outheaders.append(("Server", self.server.server_name))
+
+ buf = [self.server.protocol + " " + self.status + CRLF]
+ for k, v in self.outheaders:
+ buf.append(k + ": " + v + CRLF)
+ buf.append(CRLF)
+ self.conn.wfile.sendall("".join(buf))
+
+
+class NoSSLError(Exception):
+ """Exception raised when a client speaks HTTP to an HTTPS socket."""
+ pass
+
+
+class FatalSSLAlert(Exception):
+ """Exception raised when the SSL implementation signals a fatal alert."""
+ pass
+
+
+class CP_fileobject(socket._fileobject):
+ """Faux file object attached to a socket object."""
+
+ def __init__(self, *args, **kwargs):
+ self.bytes_read = 0
+ self.bytes_written = 0
+ socket._fileobject.__init__(self, *args, **kwargs)
+
+ def sendall(self, data):
+ """Sendall for non-blocking sockets."""
+ while data:
+ try:
+ bytes_sent = self.send(data)
+ data = data[bytes_sent:]
+ except socket.error, e:
+ if e.args[0] not in socket_errors_nonblocking:
+ raise
+
+ def send(self, data):
+ bytes_sent = self._sock.send(data)
+ self.bytes_written += bytes_sent
+ return bytes_sent
+
+ def flush(self):
+ if self._wbuf:
+ buffer = "".join(self._wbuf)
+ self._wbuf = []
+ self.sendall(buffer)
+
+ def recv(self, size):
+ while True:
+ try:
+ data = self._sock.recv(size)
+ self.bytes_read += len(data)
+ return data
+ except socket.error, e:
+ if (e.args[0] not in socket_errors_nonblocking
+ and e.args[0] not in socket_error_eintr):
+ raise
+
+ if not _fileobject_uses_str_type:
+ def read(self, size=-1):
+ # Use max, disallow tiny reads in a loop as they are very inefficient.
+ # We never leave read() with any leftover data from a new recv() call
+ # in our internal buffer.
+ rbufsize = max(self._rbufsize, self.default_bufsize)
+ # Our use of StringIO rather than lists of string objects returned by
+ # recv() minimizes memory usage and fragmentation that occurs when
+ # rbufsize is large compared to the typical return value of recv().
+ buf = self._rbuf
+ buf.seek(0, 2) # seek end
+ if size < 0:
+ # Read until EOF
+ self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
+ while True:
+ data = self.recv(rbufsize)
+ if not data:
+ break
+ buf.write(data)
+ return buf.getvalue()
+ else:
+ # Read until size bytes or EOF seen, whichever comes first
+ buf_len = buf.tell()
+ if buf_len >= size:
+ # Already have size bytes in our buffer? Extract and return.
+ buf.seek(0)
+ rv = buf.read(size)
+ self._rbuf = StringIO.StringIO()
+ self._rbuf.write(buf.read())
+ return rv
+
+ self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
+ while True:
+ left = size - buf_len
+ # recv() will malloc the amount of memory given as its
+ # parameter even though it often returns much less data
+ # than that. The returned data string is short lived
+ # as we copy it into a StringIO and free it. This avoids
+ # fragmentation issues on many platforms.
+ data = self.recv(left)
+ if not data:
+ break
+ n = len(data)
+ if n == size and not buf_len:
+ # Shortcut. Avoid buffer data copies when:
+ # - We have no data in our buffer.
+ # AND
+ # - Our call to recv returned exactly the
+ # number of bytes we were asked to read.
+ return data
+ if n == left:
+ buf.write(data)
+ del data # explicit free
+ break
+ assert n <= left, "recv(%d) returned %d bytes" % (left, n)
+ buf.write(data)
+ buf_len += n
+ del data # explicit free
+ #assert buf_len == buf.tell()
+ return buf.getvalue()
+
+ def readline(self, size=-1):
+ buf = self._rbuf
+ buf.seek(0, 2) # seek end
+ if buf.tell() > 0:
+ # check if we already have it in our buffer
+ buf.seek(0)
+ bline = buf.readline(size)
+ if bline.endswith('\n') or len(bline) == size:
+ self._rbuf = StringIO.StringIO()
+ self._rbuf.write(buf.read())
+ return bline
+ del bline
+ if size < 0:
+ # Read until \n or EOF, whichever comes first
+ if self._rbufsize <= 1:
+ # Speed up unbuffered case
+ buf.seek(0)
+ buffers = [buf.read()]
+ self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
+ data = None
+ recv = self.recv
+ while data != "\n":
+ data = recv(1)
+ if not data:
+ break
+ buffers.append(data)
+ return "".join(buffers)
+
+ buf.seek(0, 2) # seek end
+ self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
+ while True:
+ data = self.recv(self._rbufsize)
+ if not data:
+ break
+ nl = data.find('\n')
+ if nl >= 0:
+ nl += 1
+ buf.write(data[:nl])
+ self._rbuf.write(data[nl:])
+ del data
+ break
+ buf.write(data)
+ return buf.getvalue()
+ else:
+ # Read until size bytes or \n or EOF seen, whichever comes first
+ buf.seek(0, 2) # seek end
+ buf_len = buf.tell()
+ if buf_len >= size:
+ buf.seek(0)
+ rv = buf.read(size)
+ self._rbuf = StringIO.StringIO()
+ self._rbuf.write(buf.read())
+ return rv
+ self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
+ while True:
+ data = self.recv(self._rbufsize)
+ if not data:
+ break
+ left = size - buf_len
+ # did we just receive a newline?
+ nl = data.find('\n', 0, left)
+ if nl >= 0:
+ nl += 1
+ # save the excess data to _rbuf
+ self._rbuf.write(data[nl:])
+ if buf_len:
+ buf.write(data[:nl])
+ break
+ else:
+ # Shortcut. Avoid data copy through buf when returning
+ # a substring of our first recv().
+ return data[:nl]
+ n = len(data)
+ if n == size and not buf_len:
+ # Shortcut. Avoid data copy through buf when
+ # returning exactly all of our first recv().
+ return data
+ if n >= left:
+ buf.write(data[:left])
+ self._rbuf.write(data[left:])
+ break
+ buf.write(data)
+ buf_len += n
+ #assert buf_len == buf.tell()
+ return buf.getvalue()
+ else:
+ def read(self, size=-1):
+ if size < 0:
+ # Read until EOF
+ buffers = [self._rbuf]
+ self._rbuf = ""
+ if self._rbufsize <= 1:
+ recv_size = self.default_bufsize
+ else:
+ recv_size = self._rbufsize
+
+ while True:
+ data = self.recv(recv_size)
+ if not data:
+ break
+ buffers.append(data)
+ return "".join(buffers)
+ else:
+ # Read until size bytes or EOF seen, whichever comes first
+ data = self._rbuf
+ buf_len = len(data)
+ if buf_len >= size:
+ self._rbuf = data[size:]
+ return data[:size]
+ buffers = []
+ if data:
+ buffers.append(data)
+ self._rbuf = ""
+ while True:
+ left = size - buf_len
+ recv_size = max(self._rbufsize, left)
+ data = self.recv(recv_size)
+ if not data:
+ break
+ buffers.append(data)
+ n = len(data)
+ if n >= left:
+ self._rbuf = data[left:]
+ buffers[-1] = data[:left]
+ break
+ buf_len += n
+ return "".join(buffers)
+
+ def readline(self, size=-1):
+ data = self._rbuf
+ if size < 0:
+ # Read until \n or EOF, whichever comes first
+ if self._rbufsize <= 1:
+ # Speed up unbuffered case
+ assert data == ""
+ buffers = []
+ while data != "\n":
+ data = self.recv(1)
+ if not data:
+ break
+ buffers.append(data)
+ return "".join(buffers)
+ nl = data.find('\n')
+ if nl >= 0:
+ nl += 1
+ self._rbuf = data[nl:]
+ return data[:nl]
+ buffers = []
+ if data:
+ buffers.append(data)
+ self._rbuf = ""
+ while True:
+ data = self.recv(self._rbufsize)
+ if not data:
+ break
+ buffers.append(data)
+ nl = data.find('\n')
+ if nl >= 0:
+ nl += 1
+ self._rbuf = data[nl:]
+ buffers[-1] = data[:nl]
+ break
+ return "".join(buffers)
+ else:
+ # Read until size bytes or \n or EOF seen, whichever comes first
+ nl = data.find('\n', 0, size)
+ if nl >= 0:
+ nl += 1
+ self._rbuf = data[nl:]
+ return data[:nl]
+ buf_len = len(data)
+ if buf_len >= size:
+ self._rbuf = data[size:]
+ return data[:size]
+ buffers = []
+ if data:
+ buffers.append(data)
+ self._rbuf = ""
+ while True:
+ data = self.recv(self._rbufsize)
+ if not data:
+ break
+ buffers.append(data)
+ left = size - buf_len
+ nl = data.find('\n', 0, left)
+ if nl >= 0:
+ nl += 1
+ self._rbuf = data[nl:]
+ buffers[-1] = data[:nl]
+ break
+ n = len(data)
+ if n >= left:
+ self._rbuf = data[left:]
+ buffers[-1] = data[:left]
+ break
+ buf_len += n
+ return "".join(buffers)
+
+
+class HTTPConnection(object):
+ """An HTTP connection (active socket).
+
+ server: the Server object which received this connection.
+ socket: the raw socket object (usually TCP) for this connection.
+ makefile: a fileobject class for reading from the socket.
+ """
+
+ remote_addr = None
+ remote_port = None
+ ssl_env = None
+ rbufsize = DEFAULT_BUFFER_SIZE
+ wbufsize = DEFAULT_BUFFER_SIZE
+ RequestHandlerClass = HTTPRequest
+
+ def __init__(self, server, sock, makefile=CP_fileobject):
+ self.server = server
+ self.socket = sock
+ self.rfile = makefile(sock, "rb", self.rbufsize)
+ self.wfile = makefile(sock, "wb", self.wbufsize)
+ self.requests_seen = 0
+
+ def communicate(self):
+ """Read each request and respond appropriately."""
+ request_seen = False
+ try:
+ while True:
+ # (re)set req to None so that if something goes wrong in
+ # the RequestHandlerClass constructor, the error doesn't
+ # get written to the previous request.
+ req = None
+ req = self.RequestHandlerClass(self.server, self)
+
+ # This order of operations should guarantee correct pipelining.
+ req.parse_request()
+ if self.server.stats['Enabled']:
+ self.requests_seen += 1
+ if not req.ready:
+ # Something went wrong in the parsing (and the server has
+ # probably already made a simple_response). Return and
+ # let the conn close.
+ return
+
+ request_seen = True
+ req.respond()
+ if req.close_connection:
+ return
+ except socket.error, e:
+ errnum = e.args[0]
+ # sadly SSL sockets return a different (longer) time out string
+ if errnum == 'timed out' or errnum == 'The read operation timed out':
+ # Don't error if we're between requests; only error
+ # if 1) no request has been started at all, or 2) we're
+ # in the middle of a request.
+ # See http://www.cherrypy.org/ticket/853
+ if (not request_seen) or (req and req.started_request):
+ # Don't bother writing the 408 if the response
+ # has already started being written.
+ if req and not req.sent_headers:
+ try:
+ req.simple_response("408 Request Timeout")
+ except FatalSSLAlert:
+ # Close the connection.
+ return
+ elif errnum not in socket_errors_to_ignore:
+ if req and not req.sent_headers:
+ try:
+ req.simple_response("500 Internal Server Error",
+ format_exc())
+ except FatalSSLAlert:
+ # Close the connection.
+ return
+ return
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except FatalSSLAlert:
+ # Close the connection.
+ return
+ except NoSSLError:
+ if req and not req.sent_headers:
+ # Unwrap our wfile
+ self.wfile = CP_fileobject(self.socket._sock, "wb", self.wbufsize)
+ req.simple_response("400 Bad Request",
+ "The client sent a plain HTTP request, but "
+ "this server only speaks HTTPS on this port.")
+ self.linger = True
+ except Exception:
+ if req and not req.sent_headers:
+ try:
+ req.simple_response("500 Internal Server Error", format_exc())
+ except FatalSSLAlert:
+ # Close the connection.
+ return
+
+ linger = False
+
+ def close(self):
+ """Close the socket underlying this connection."""
+ self.rfile.close()
+
+ if not self.linger:
+ # Python's socket module does NOT call close on the kernel socket
+ # when you call socket.close(). We do so manually here because we
+ # want this server to send a FIN TCP segment immediately. Note this
+ # must be called *before* calling socket.close(), because the latter
+ # drops its reference to the kernel socket.
+ if hasattr(self.socket, '_sock'):
+ self.socket._sock.close()
+ self.socket.close()
+ else:
+ # On the other hand, sometimes we want to hang around for a bit
+ # to make sure the client has a chance to read our entire
+ # response. Skipping the close() calls here delays the FIN
+ # packet until the socket object is garbage-collected later.
+ # Someday, perhaps, we'll do the full lingering_close that
+ # Apache does, but not today.
+ pass
+
+
+_SHUTDOWNREQUEST = None
+
+class WorkerThread(threading.Thread):
+ """Thread which continuously polls a Queue for Connection objects.
+
+ Due to the timing issues of polling a Queue, a WorkerThread does not
+ check its own 'ready' flag after it has started. To stop the thread,
+ it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
+ (one for each running WorkerThread).
+ """
+
+ conn = None
+ """The current connection pulled off the Queue, or None."""
+
+ server = None
+ """The HTTP Server which spawned this thread, and which owns the
+ Queue and is placing active connections into it."""
+
+ ready = False
+ """A simple flag for the calling server to know when this thread
+ has begun polling the Queue."""
+
+
+ def __init__(self, server):
+ self.ready = False
+ self.server = server
+
+ self.requests_seen = 0
+ self.bytes_read = 0
+ self.bytes_written = 0
+ self.start_time = None
+ self.work_time = 0
+ self.stats = {
+ 'Requests': lambda s: self.requests_seen + ((self.start_time is None) and 0 or self.conn.requests_seen),
+ 'Bytes Read': lambda s: self.bytes_read + ((self.start_time is None) and 0 or self.conn.rfile.bytes_read),
+ 'Bytes Written': lambda s: self.bytes_written + ((self.start_time is None) and 0 or self.conn.wfile.bytes_written),
+ 'Work Time': lambda s: self.work_time + ((self.start_time is None) and 0 or time.time() - self.start_time),
+ 'Read Throughput': lambda s: s['Bytes Read'](s) / (s['Work Time'](s) or 1e-6),
+ 'Write Throughput': lambda s: s['Bytes Written'](s) / (s['Work Time'](s) or 1e-6),
+ }
+ threading.Thread.__init__(self)
+
+ def run(self):
+ self.server.stats['Worker Threads'][self.getName()] = self.stats
+ try:
+ self.ready = True
+ while True:
+ conn = self.server.requests.get()
+ if conn is _SHUTDOWNREQUEST:
+ return
+
+ self.conn = conn
+ if self.server.stats['Enabled']:
+ self.start_time = time.time()
+ try:
+ conn.communicate()
+ finally:
+ conn.close()
+ if self.server.stats['Enabled']:
+ self.requests_seen += self.conn.requests_seen
+ self.bytes_read += self.conn.rfile.bytes_read
+ self.bytes_written += self.conn.wfile.bytes_written
+ self.work_time += time.time() - self.start_time
+ self.start_time = None
+ self.conn = None
+ except (KeyboardInterrupt, SystemExit), exc:
+ self.server.interrupt = exc
+
+
+class ThreadPool(object):
+ """A Request Queue for the CherryPyWSGIServer which pools threads.
+
+ ThreadPool objects must provide min, get(), put(obj), start()
+ and stop(timeout) attributes.
+ """
+
+ def __init__(self, server, min=10, max=-1):
+ self.server = server
+ self.min = min
+ self.max = max
+ self._threads = []
+ self._queue = Queue.Queue()
+ self.get = self._queue.get
+
+ def start(self):
+ """Start the pool of threads."""
+ for i in range(self.min):
+ self._threads.append(WorkerThread(self.server))
+ for worker in self._threads:
+ worker.setName("CP Server " + worker.getName())
+ worker.start()
+ for worker in self._threads:
+ while not worker.ready:
+ time.sleep(.1)
+
+ def _get_idle(self):
+ """Number of worker threads which are idle. Read-only."""
+ return len([t for t in self._threads if t.conn is None])
+ idle = property(_get_idle, doc=_get_idle.__doc__)
+
+ def put(self, obj):
+ self._queue.put(obj)
+ if obj is _SHUTDOWNREQUEST:
+ return
+
+ def grow(self, amount):
+ """Spawn new worker threads (not above self.max)."""
+ for i in range(amount):
+ if self.max > 0 and len(self._threads) >= self.max:
+ break
+ worker = WorkerThread(self.server)
+ worker.setName("CP Server " + worker.getName())
+ self._threads.append(worker)
+ worker.start()
+
+ def shrink(self, amount):
+ """Kill off worker threads (not below self.min)."""
+ # Grow/shrink the pool if necessary.
+ # Remove any dead threads from our list
+ for t in self._threads:
+ if not t.isAlive():
+ self._threads.remove(t)
+ amount -= 1
+
+ if amount > 0:
+ for i in range(min(amount, len(self._threads) - self.min)):
+ # Put a number of shutdown requests on the queue equal
+ # to 'amount'. Once each of those is processed by a worker,
+ # that worker will terminate and be culled from our list
+ # in self.put.
+ self._queue.put(_SHUTDOWNREQUEST)
+
+ def stop(self, timeout=5):
+ # Must shut down threads here so the code that calls
+ # this method can know when all threads are stopped.
+ for worker in self._threads:
+ self._queue.put(_SHUTDOWNREQUEST)
+
+ # Don't join currentThread (when stop is called inside a request).
+ current = threading.currentThread()
+ if timeout and timeout >= 0:
+ endtime = time.time() + timeout
+ while self._threads:
+ worker = self._threads.pop()
+ if worker is not current and worker.isAlive():
+ try:
+ if timeout is None or timeout < 0:
+ worker.join()
+ else:
+ remaining_time = endtime - time.time()
+ if remaining_time > 0:
+ worker.join(remaining_time)
+ if worker.isAlive():
+ # We exhausted the timeout.
+ # Forcibly shut down the socket.
+ c = worker.conn
+ if c and not c.rfile.closed:
+ try:
+ c.socket.shutdown(socket.SHUT_RD)
+ except TypeError:
+ # pyOpenSSL sockets don't take an arg
+ c.socket.shutdown()
+ worker.join()
+ except (AssertionError,
+ # Ignore repeated Ctrl-C.
+ # See http://www.cherrypy.org/ticket/691.
+ KeyboardInterrupt), exc1:
+ pass
+
+ def _get_qsize(self):
+ return self._queue.qsize()
+ qsize = property(_get_qsize)
+
+
+
+try:
+ import fcntl
+except ImportError:
+ try:
+ from ctypes import windll, WinError
+ except ImportError:
+ def prevent_socket_inheritance(sock):
+ """Dummy function, since neither fcntl nor ctypes are available."""
+ pass
+ else:
+ def prevent_socket_inheritance(sock):
+ """Mark the given socket fd as non-inheritable (Windows)."""
+ if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0):
+ raise WinError()
+else:
+ def prevent_socket_inheritance(sock):
+ """Mark the given socket fd as non-inheritable (POSIX)."""
+ fd = sock.fileno()
+ old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
+ fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
+
+
+class SSLAdapter(object):
+ """Base class for SSL driver library adapters.
+
+ Required methods:
+
+ * ``wrap(sock) -> (wrapped socket, ssl environ dict)``
+ * ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) -> socket file object``
+ """
+
+ def __init__(self, certificate, private_key, certificate_chain=None):
+ self.certificate = certificate
+ self.private_key = private_key
+ self.certificate_chain = certificate_chain
+
+ def wrap(self, sock):
+ raise NotImplemented
+
+ def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
+ raise NotImplemented
+
+
+class HTTPServer(object):
+ """An HTTP server."""
+
+ _bind_addr = "127.0.0.1"
+ _interrupt = None
+
+ gateway = None
+ """A Gateway instance."""
+
+ minthreads = None
+ """The minimum number of worker threads to create (default 10)."""
+
+ maxthreads = None
+ """The maximum number of worker threads to create (default -1 = no limit)."""
+
+ server_name = None
+ """The name of the server; defaults to socket.gethostname()."""
+
+ protocol = "HTTP/1.1"
+ """The version string to write in the Status-Line of all HTTP responses.
+
+ For example, "HTTP/1.1" is the default. This also limits the supported
+ features used in the response."""
+
+ request_queue_size = 5
+ """The 'backlog' arg to socket.listen(); max queued connections (default 5)."""
+
+ shutdown_timeout = 5
+ """The total time, in seconds, to wait for worker threads to cleanly exit."""
+
+ timeout = 10
+ """The timeout in seconds for accepted connections (default 10)."""
+
+ version = "CherryPy/3.2.0"
+ """A version string for the HTTPServer."""
+
+ software = None
+ """The value to set for the SERVER_SOFTWARE entry in the WSGI environ.
+
+ If None, this defaults to ``'%s Server' % self.version``."""
+
+ ready = False
+ """An internal flag which marks whether the socket is accepting connections."""
+
+ max_request_header_size = 0
+ """The maximum size, in bytes, for request headers, or 0 for no limit."""
+
+ max_request_body_size = 0
+ """The maximum size, in bytes, for request bodies, or 0 for no limit."""
+
+ nodelay = True
+ """If True (the default since 3.1), sets the TCP_NODELAY socket option."""
+
+ ConnectionClass = HTTPConnection
+ """The class to use for handling HTTP connections."""
+
+ ssl_adapter = None
+ """An instance of SSLAdapter (or a subclass).
+
+ You must have the corresponding SSL driver library installed."""
+
+ def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1,
+ server_name=None):
+ self.bind_addr = bind_addr
+ self.gateway = gateway
+
+ self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads)
+
+ if not server_name:
+ server_name = socket.gethostname()
+ self.server_name = server_name
+ self.clear_stats()
+
+ def clear_stats(self):
+ self._start_time = None
+ self._run_time = 0
+ self.stats = {
+ 'Enabled': False,
+ 'Bind Address': lambda s: repr(self.bind_addr),
+ 'Run time': lambda s: (not s['Enabled']) and 0 or self.runtime(),
+ 'Accepts': 0,
+ 'Accepts/sec': lambda s: s['Accepts'] / self.runtime(),
+ 'Queue': lambda s: getattr(self.requests, "qsize", None),
+ 'Threads': lambda s: len(getattr(self.requests, "_threads", [])),
+ 'Threads Idle': lambda s: getattr(self.requests, "idle", None),
+ 'Socket Errors': 0,
+ 'Requests': lambda s: (not s['Enabled']) and 0 or sum([w['Requests'](w) for w
+ in s['Worker Threads'].values()], 0),
+ 'Bytes Read': lambda s: (not s['Enabled']) and 0 or sum([w['Bytes Read'](w) for w
+ in s['Worker Threads'].values()], 0),
+ 'Bytes Written': lambda s: (not s['Enabled']) and 0 or sum([w['Bytes Written'](w) for w
+ in s['Worker Threads'].values()], 0),
+ 'Work Time': lambda s: (not s['Enabled']) and 0 or sum([w['Work Time'](w) for w
+ in s['Worker Threads'].values()], 0),
+ 'Read Throughput': lambda s: (not s['Enabled']) and 0 or sum(
+ [w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6)
+ for w in s['Worker Threads'].values()], 0),
+ 'Write Throughput': lambda s: (not s['Enabled']) and 0 or sum(
+ [w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6)
+ for w in s['Worker Threads'].values()], 0),
+ 'Worker Threads': {},
+ }
+ logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats
+
+ def runtime(self):
+ if self._start_time is None:
+ return self._run_time
+ else:
+ return self._run_time + (time.time() - self._start_time)
+
+ def __str__(self):
+ return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
+ self.bind_addr)
+
+ def _get_bind_addr(self):
+ return self._bind_addr
+ def _set_bind_addr(self, value):
+ if isinstance(value, tuple) and value[0] in ('', None):
+ # Despite the socket module docs, using '' does not
+ # allow AI_PASSIVE to work. Passing None instead
+ # returns '0.0.0.0' like we want. In other words:
+ # host AI_PASSIVE result
+ # '' Y 192.168.x.y
+ # '' N 192.168.x.y
+ # None Y 0.0.0.0
+ # None N 127.0.0.1
+ # But since you can get the same effect with an explicit
+ # '0.0.0.0', we deny both the empty string and None as values.
+ raise ValueError("Host values of '' or None are not allowed. "
+ "Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
+ "to listen on all active interfaces.")
+ self._bind_addr = value
+ bind_addr = property(_get_bind_addr, _set_bind_addr,
+ doc="""The interface on which to listen for connections.
+
+ For TCP sockets, a (host, port) tuple. Host values may be any IPv4
+ or IPv6 address, or any valid hostname. The string 'localhost' is a
+ synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
+ The string '0.0.0.0' is a special IPv4 entry meaning "any active
+ interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
+ IPv6. The empty string or None are not allowed.
+
+ For UNIX sockets, supply the filename as a string.""")
+
+ def start(self):
+ """Run the server forever."""
+ # We don't have to trap KeyboardInterrupt or SystemExit here,
+ # because cherrpy.server already does so, calling self.stop() for us.
+ # If you're using this server with another framework, you should
+ # trap those exceptions in whatever code block calls start().
+ self._interrupt = None
+
+ if self.software is None:
+ self.software = "%s Server" % self.version
+
+ # SSL backward compatibility
+ if (self.ssl_adapter is None and
+ getattr(self, 'ssl_certificate', None) and
+ getattr(self, 'ssl_private_key', None)):
+ warnings.warn(
+ "SSL attributes are deprecated in CherryPy 3.2, and will "
+ "be removed in CherryPy 3.3. Use an ssl_adapter attribute "
+ "instead.",
+ DeprecationWarning
+ )
+ try:
+ from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
+ except ImportError:
+ pass
+ else:
+ self.ssl_adapter = pyOpenSSLAdapter(
+ self.ssl_certificate, self.ssl_private_key,
+ getattr(self, 'ssl_certificate_chain', None))
+
+ # Select the appropriate socket
+ if isinstance(self.bind_addr, basestring):
+ # AF_UNIX socket
+
+ # So we can reuse the socket...
+ try: os.unlink(self.bind_addr)
+ except: pass
+
+ # So everyone can access the socket...
+ try: os.chmod(self.bind_addr, 0777)
+ except: pass
+
+ info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
+ else:
+ # AF_INET or AF_INET6 socket
+ # Get the correct address family for our host (allows IPv6 addresses)
+ host, port = self.bind_addr
+ try:
+ info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
+ socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
+ except socket.gaierror:
+ if ':' in self.bind_addr[0]:
+ info = [(socket.AF_INET6, socket.SOCK_STREAM,
+ 0, "", self.bind_addr + (0, 0))]
+ else:
+ info = [(socket.AF_INET, socket.SOCK_STREAM,
+ 0, "", self.bind_addr)]
+
+ self.socket = None
+ msg = "No socket could be created"
+ for res in info:
+ af, socktype, proto, canonname, sa = res
+ try:
+ self.bind(af, socktype, proto)
+ except socket.error:
+ if self.socket:
+ self.socket.close()
+ self.socket = None
+ continue
+ break
+ if not self.socket:
+ raise socket.error(msg)
+
+ # Timeout so KeyboardInterrupt can be caught on Win32
+ self.socket.settimeout(1)
+ self.socket.listen(self.request_queue_size)
+
+ # Create worker threads
+ self.requests.start()
+
+ self.ready = True
+ self._start_time = time.time()
+ while self.ready:
+ self.tick()
+ if self.interrupt:
+ while self.interrupt is True:
+ # Wait for self.stop() to complete. See _set_interrupt.
+ time.sleep(0.1)
+ if self.interrupt:
+ raise self.interrupt
+
+ def bind(self, family, type, proto=0):
+ """Create (or recreate) the actual socket object."""
+ self.socket = socket.socket(family, type, proto)
+ prevent_socket_inheritance(self.socket)
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ if self.nodelay and not isinstance(self.bind_addr, str):
+ self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+
+ if self.ssl_adapter is not None:
+ self.socket = self.ssl_adapter.bind(self.socket)
+
+ # If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
+ # activate dual-stack. See http://www.cherrypy.org/ticket/871.
+ if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6
+ and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')):
+ try:
+ self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
+ except (AttributeError, socket.error):
+ # Apparently, the socket option is not available in
+ # this machine's TCP stack
+ pass
+
+ self.socket.bind(self.bind_addr)
+
+ def tick(self):
+ """Accept a new connection and put it on the Queue."""
+ try:
+ s, addr = self.socket.accept()
+ if self.stats['Enabled']:
+ self.stats['Accepts'] += 1
+ if not self.ready:
+ return
+
+ prevent_socket_inheritance(s)
+ if hasattr(s, 'settimeout'):
+ s.settimeout(self.timeout)
+
+ makefile = CP_fileobject
+ ssl_env = {}
+ # if ssl cert and key are set, we try to be a secure HTTP server
+ if self.ssl_adapter is not None:
+ try:
+ s, ssl_env = self.ssl_adapter.wrap(s)
+ except NoSSLError:
+ msg = ("The client sent a plain HTTP request, but "
+ "this server only speaks HTTPS on this port.")
+ buf = ["%s 400 Bad Request\r\n" % self.protocol,
+ "Content-Length: %s\r\n" % len(msg),
+ "Content-Type: text/plain\r\n\r\n",
+ msg]
+
+ wfile = CP_fileobject(s, "wb", DEFAULT_BUFFER_SIZE)
+ try:
+ wfile.sendall("".join(buf))
+ except socket.error, x:
+ if x.args[0] not in socket_errors_to_ignore:
+ raise
+ return
+ if not s:
+ return
+ makefile = self.ssl_adapter.makefile
+ # Re-apply our timeout since we may have a new socket object
+ if hasattr(s, 'settimeout'):
+ s.settimeout(self.timeout)
+
+ conn = self.ConnectionClass(self, s, makefile)
+
+ if not isinstance(self.bind_addr, basestring):
+ # optional values
+ # Until we do DNS lookups, omit REMOTE_HOST
+ if addr is None: # sometimes this can happen
+ # figure out if AF_INET or AF_INET6.
+ if len(s.getsockname()) == 2:
+ # AF_INET
+ addr = ('0.0.0.0', 0)
+ else:
+ # AF_INET6
+ addr = ('::', 0)
+ conn.remote_addr = addr[0]
+ conn.remote_port = addr[1]
+
+ conn.ssl_env = ssl_env
+
+ self.requests.put(conn)
+ except socket.timeout:
+ # The only reason for the timeout in start() is so we can
+ # notice keyboard interrupts on Win32, which don't interrupt
+ # accept() by default
+ return
+ except socket.error, x:
+ if self.stats['Enabled']:
+ self.stats['Socket Errors'] += 1
+ if x.args[0] in socket_error_eintr:
+ # I *think* this is right. EINTR should occur when a signal
+ # is received during the accept() call; all docs say retry
+ # the call, and I *think* I'm reading it right that Python
+ # will then go ahead and poll for and handle the signal
+ # elsewhere. See http://www.cherrypy.org/ticket/707.
+ return
+ if x.args[0] in socket_errors_nonblocking:
+ # Just try again. See http://www.cherrypy.org/ticket/479.
+ return
+ if x.args[0] in socket_errors_to_ignore:
+ # Our socket was closed.
+ # See http://www.cherrypy.org/ticket/686.
+ return
+ raise
+
+ def _get_interrupt(self):
+ return self._interrupt
+ def _set_interrupt(self, interrupt):
+ self._interrupt = True
+ self.stop()
+ self._interrupt = interrupt
+ interrupt = property(_get_interrupt, _set_interrupt,
+ doc="Set this to an Exception instance to "
+ "interrupt the server.")
+
+ def stop(self):
+ """Gracefully shutdown a server that is serving forever."""
+ self.ready = False
+ if self._start_time is not None:
+ self._run_time += (time.time() - self._start_time)
+ self._start_time = None
+
+ sock = getattr(self, "socket", None)
+ if sock:
+ if not isinstance(self.bind_addr, basestring):
+ # Touch our own socket to make accept() return immediately.
+ try:
+ host, port = sock.getsockname()[:2]
+ except socket.error, x:
+ if x.args[0] not in socket_errors_to_ignore:
+ # Changed to use error code and not message
+ # See http://www.cherrypy.org/ticket/860.
+ raise
+ else:
+ # Note that we're explicitly NOT using AI_PASSIVE,
+ # here, because we want an actual IP to touch.
+ # localhost won't work if we've bound to a public IP,
+ # but it will if we bound to '0.0.0.0' (INADDR_ANY).
+ for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
+ socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ s = None
+ try:
+ s = socket.socket(af, socktype, proto)
+ # See http://groups.google.com/group/cherrypy-users/
+ # browse_frm/thread/bbfe5eb39c904fe0
+ s.settimeout(1.0)
+ s.connect((host, port))
+ s.close()
+ except socket.error:
+ if s:
+ s.close()
+ if hasattr(sock, "close"):
+ sock.close()
+ self.socket = None
+
+ self.requests.stop(self.shutdown_timeout)
+
+
+class Gateway(object):
+
+ def __init__(self, req):
+ self.req = req
+
+ def respond(self):
+ raise NotImplemented
+
+
+# These may either be wsgiserver.SSLAdapter subclasses or the string names
+# of such classes (in which case they will be lazily loaded).
+ssl_adapters = {
+ 'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
+ 'pyopenssl': 'cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter',
+ }
+
+def get_ssl_adapter_class(name='pyopenssl'):
+ adapter = ssl_adapters[name.lower()]
+ if isinstance(adapter, basestring):
+ last_dot = adapter.rfind(".")
+ attr_name = adapter[last_dot + 1:]
+ mod_path = adapter[:last_dot]
+
+ try:
+ mod = sys.modules[mod_path]
+ if mod is None:
+ raise KeyError()
+ except KeyError:
+ # The last [''] is important.
+ mod = __import__(mod_path, globals(), locals(), [''])
+
+ # Let an AttributeError propagate outward.
+ try:
+ adapter = getattr(mod, attr_name)
+ except AttributeError:
+ raise AttributeError("'%s' object has no attribute '%s'"
+ % (mod_path, attr_name))
+
+ return adapter
+
+# -------------------------------- WSGI Stuff -------------------------------- #
+
+
+class CherryPyWSGIServer(HTTPServer):
+
+ wsgi_version = (1, 0)
+
+ def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
+ max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
+ self.requests = ThreadPool(self, min=numthreads or 1, max=max)
+ self.wsgi_app = wsgi_app
+ self.gateway = wsgi_gateways[self.wsgi_version]
+
+ self.bind_addr = bind_addr
+ if not server_name:
+ server_name = socket.gethostname()
+ self.server_name = server_name
+ self.request_queue_size = request_queue_size
+
+ self.timeout = timeout
+ self.shutdown_timeout = shutdown_timeout
+ self.clear_stats()
+
+ def _get_numthreads(self):
+ return self.requests.min
+ def _set_numthreads(self, value):
+ self.requests.min = value
+ numthreads = property(_get_numthreads, _set_numthreads)
+
+
+class WSGIGateway(Gateway):
+
+ def __init__(self, req):
+ self.req = req
+ self.started_response = False
+ self.env = self.get_environ()
+ self.remaining_bytes_out = None
+
+ def get_environ(self):
+ """Return a new environ dict targeting the given wsgi.version"""
+ raise NotImplemented
+
+ def respond(self):
+ response = self.req.server.wsgi_app(self.env, self.start_response)
+ try:
+ for chunk in response:
+ # "The start_response callable must not actually transmit
+ # the response headers. Instead, it must store them for the
+ # server or gateway to transmit only after the first
+ # iteration of the application return value that yields
+ # a NON-EMPTY string, or upon the application's first
+ # invocation of the write() callable." (PEP 333)
+ if chunk:
+ if isinstance(chunk, unicode):
+ chunk = chunk.encode('ISO-8859-1')
+ self.write(chunk)
+ finally:
+ if hasattr(response, "close"):
+ response.close()
+
+ def start_response(self, status, headers, exc_info = None):
+ """WSGI callable to begin the HTTP response."""
+ # "The application may call start_response more than once,
+ # if and only if the exc_info argument is provided."
+ if self.started_response and not exc_info:
+ raise AssertionError("WSGI start_response called a second "
+ "time with no exc_info.")
+ self.started_response = True
+
+ # "if exc_info is provided, and the HTTP headers have already been
+ # sent, start_response must raise an error, and should raise the
+ # exc_info tuple."
+ if self.req.sent_headers:
+ try:
+ raise exc_info[0], exc_info[1], exc_info[2]
+ finally:
+ exc_info = None
+
+ self.req.status = status
+ for k, v in headers:
+ if not isinstance(k, str):
+ raise TypeError("WSGI response header key %r is not a byte string." % k)
+ if not isinstance(v, str):
+ raise TypeError("WSGI response header value %r is not a byte string." % v)
+ if k.lower() == 'content-length':
+ self.remaining_bytes_out = int(v)
+ self.req.outheaders.extend(headers)
+
+ return self.write
+
+ def write(self, chunk):
+ """WSGI callable to write unbuffered data to the client.
+
+ This method is also used internally by start_response (to write
+ data from the iterable returned by the WSGI application).
+ """
+ if not self.started_response:
+ raise AssertionError("WSGI write called before start_response.")
+
+ chunklen = len(chunk)
+ rbo = self.remaining_bytes_out
+ if rbo is not None and chunklen > rbo:
+ if not self.req.sent_headers:
+ # Whew. We can send a 500 to the client.
+ self.req.simple_response("500 Internal Server Error",
+ "The requested resource returned more bytes than the "
+ "declared Content-Length.")
+ else:
+ # Dang. We have probably already sent data. Truncate the chunk
+ # to fit (so the client doesn't hang) and raise an error later.
+ chunk = chunk[:rbo]
+
+ if not self.req.sent_headers:
+ self.req.sent_headers = True
+ self.req.send_headers()
+
+ self.req.write(chunk)
+
+ if rbo is not None:
+ rbo -= chunklen
+ if rbo < 0:
+ raise ValueError(
+ "Response body exceeds the declared Content-Length.")
+
+
+class WSGIGateway_10(WSGIGateway):
+
+ def get_environ(self):
+ """Return a new environ dict targeting the given wsgi.version"""
+ req = self.req
+ env = {
+ # set a non-standard environ entry so the WSGI app can know what
+ # the *real* server protocol is (and what features to support).
+ # See http://www.faqs.org/rfcs/rfc2145.html.
+ 'ACTUAL_SERVER_PROTOCOL': req.server.protocol,
+ 'PATH_INFO': req.path,
+ 'QUERY_STRING': req.qs,
+ 'REMOTE_ADDR': req.conn.remote_addr or '',
+ 'REMOTE_PORT': str(req.conn.remote_port or ''),
+ 'REQUEST_METHOD': req.method,
+ 'REQUEST_URI': req.uri,
+ 'SCRIPT_NAME': '',
+ 'SERVER_NAME': req.server.server_name,
+ # Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
+ 'SERVER_PROTOCOL': req.request_protocol,
+ 'SERVER_SOFTWARE': req.server.software,
+ 'wsgi.errors': sys.stderr,
+ 'wsgi.input': req.rfile,
+ 'wsgi.multiprocess': False,
+ 'wsgi.multithread': True,
+ 'wsgi.run_once': False,
+ 'wsgi.url_scheme': req.scheme,
+ 'wsgi.version': (1, 0),
+ }
+
+ if isinstance(req.server.bind_addr, basestring):
+ # AF_UNIX. This isn't really allowed by WSGI, which doesn't
+ # address unix domain sockets. But it's better than nothing.
+ env["SERVER_PORT"] = ""
+ else:
+ env["SERVER_PORT"] = str(req.server.bind_addr[1])
+
+ # Request headers
+ for k, v in req.inheaders.iteritems():
+ env["HTTP_" + k.upper().replace("-", "_")] = v
+
+ # CONTENT_TYPE/CONTENT_LENGTH
+ ct = env.pop("HTTP_CONTENT_TYPE", None)
+ if ct is not None:
+ env["CONTENT_TYPE"] = ct
+ cl = env.pop("HTTP_CONTENT_LENGTH", None)
+ if cl is not None:
+ env["CONTENT_LENGTH"] = cl
+
+ if req.conn.ssl_env:
+ env.update(req.conn.ssl_env)
+
+ return env
+
+
+class WSGIGateway_u0(WSGIGateway_10):
+
+ def get_environ(self):
+ """Return a new environ dict targeting the given wsgi.version"""
+ req = self.req
+ env_10 = WSGIGateway_10.get_environ(self)
+ env = dict([(k.decode('ISO-8859-1'), v) for k, v in env_10.iteritems()])
+ env[u'wsgi.version'] = ('u', 0)
+
+ # Request-URI
+ env.setdefault(u'wsgi.url_encoding', u'utf-8')
+ try:
+ for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
+ env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
+ except UnicodeDecodeError:
+ # Fall back to latin 1 so apps can transcode if needed.
+ env[u'wsgi.url_encoding'] = u'ISO-8859-1'
+ for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
+ env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
+
+ for k, v in sorted(env.items()):
+ if isinstance(v, str) and k not in ('REQUEST_URI', 'wsgi.input'):
+ env[k] = v.decode('ISO-8859-1')
+
+ return env
+
+wsgi_gateways = {
+ (1, 0): WSGIGateway_10,
+ ('u', 0): WSGIGateway_u0,
+}
+
+class WSGIPathInfoDispatcher(object):
+ """A WSGI dispatcher for dispatch based on the PATH_INFO.
+
+ apps: a dict or list of (path_prefix, app) pairs.
+ """
+
+ def __init__(self, apps):
+ try:
+ apps = apps.items()
+ except AttributeError:
+ pass
+
+ # Sort the apps by len(path), descending
+ apps.sort(cmp=lambda x,y: cmp(len(x[0]), len(y[0])))
+ apps.reverse()
+
+ # The path_prefix strings must start, but not end, with a slash.
+ # Use "" instead of "/".
+ self.apps = [(p.rstrip("/"), a) for p, a in apps]
+
+ def __call__(self, environ, start_response):
+ path = environ["PATH_INFO"] or "/"
+ for p, app in self.apps:
+ # The apps list should be sorted by length, descending.
+ if path.startswith(p + "/") or path == p:
+ environ = environ.copy()
+ environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
+ environ["PATH_INFO"] = path[len(p):]
+ return app(environ, start_response)
+
+ start_response('404 Not Found', [('Content-Type', 'text/plain'),
+ ('Content-Length', '0')])
+ return ['']
+
diff --git a/lib/nulib/python/nulib/ext/web/wsgiserver/ssl_builtin.py b/lib/nulib/python/nulib/ext/web/wsgiserver/ssl_builtin.py
new file mode 100644
index 0000000..64c0eeb
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/wsgiserver/ssl_builtin.py
@@ -0,0 +1,72 @@
+"""A library for integrating Python's builtin ``ssl`` library with CherryPy.
+
+The ssl module must be importable for SSL functionality.
+
+To use this module, set ``CherryPyWSGIServer.ssl_adapter`` to an instance of
+``BuiltinSSLAdapter``.
+"""
+
+try:
+ import ssl
+except ImportError:
+ ssl = None
+
+from cherrypy import wsgiserver
+
+
+class BuiltinSSLAdapter(wsgiserver.SSLAdapter):
+ """A wrapper for integrating Python's builtin ssl module with CherryPy."""
+
+ certificate = None
+ """The filename of the server SSL certificate."""
+
+ private_key = None
+ """The filename of the server's private key file."""
+
+ def __init__(self, certificate, private_key, certificate_chain=None):
+ if ssl is None:
+ raise ImportError("You must install the ssl module to use HTTPS.")
+ self.certificate = certificate
+ self.private_key = private_key
+ self.certificate_chain = certificate_chain
+
+ def bind(self, sock):
+ """Wrap and return the given socket."""
+ return sock
+
+ def wrap(self, sock):
+ """Wrap and return the given socket, plus WSGI environ entries."""
+ try:
+ s = ssl.wrap_socket(sock, do_handshake_on_connect=True,
+ server_side=True, certfile=self.certificate,
+ keyfile=self.private_key, ssl_version=ssl.PROTOCOL_SSLv23)
+ except ssl.SSLError, e:
+ if e.errno == ssl.SSL_ERROR_EOF:
+ # This is almost certainly due to the cherrypy engine
+ # 'pinging' the socket to assert it's connectable;
+ # the 'ping' isn't SSL.
+ return None, {}
+ elif e.errno == ssl.SSL_ERROR_SSL:
+ if e.args[1].endswith('http request'):
+ # The client is speaking HTTP to an HTTPS server.
+ raise wsgiserver.NoSSLError
+ raise
+ return s, self.get_environ(s)
+
+ # TODO: fill this out more with mod ssl env
+ def get_environ(self, sock):
+ """Create WSGI environ entries to be merged into each request."""
+ cipher = sock.cipher()
+ ssl_environ = {
+ "wsgi.url_scheme": "https",
+ "HTTPS": "on",
+ 'SSL_PROTOCOL': cipher[1],
+ 'SSL_CIPHER': cipher[0]
+## SSL_VERSION_INTERFACE string The mod_ssl program version
+## SSL_VERSION_LIBRARY string The OpenSSL program version
+ }
+ return ssl_environ
+
+ def makefile(self, sock, mode='r', bufsize=-1):
+ return wsgiserver.CP_fileobject(sock, mode, bufsize)
+
diff --git a/lib/nulib/python/nulib/ext/web/wsgiserver/ssl_pyopenssl.py b/lib/nulib/python/nulib/ext/web/wsgiserver/ssl_pyopenssl.py
new file mode 100644
index 0000000..f3d9bf5
--- /dev/null
+++ b/lib/nulib/python/nulib/ext/web/wsgiserver/ssl_pyopenssl.py
@@ -0,0 +1,256 @@
+"""A library for integrating pyOpenSSL with CherryPy.
+
+The OpenSSL module must be importable for SSL functionality.
+You can obtain it from http://pyopenssl.sourceforge.net/
+
+To use this module, set CherryPyWSGIServer.ssl_adapter to an instance of
+SSLAdapter. There are two ways to use SSL:
+
+Method One
+----------
+
+ * ``ssl_adapter.context``: an instance of SSL.Context.
+
+If this is not None, it is assumed to be an SSL.Context instance,
+and will be passed to SSL.Connection on bind(). The developer is
+responsible for forming a valid Context object. This approach is
+to be preferred for more flexibility, e.g. if the cert and key are
+streams instead of files, or need decryption, or SSL.SSLv3_METHOD
+is desired instead of the default SSL.SSLv23_METHOD, etc. Consult
+the pyOpenSSL documentation for complete options.
+
+Method Two (shortcut)
+---------------------
+
+ * ``ssl_adapter.certificate``: the filename of the server SSL certificate.
+ * ``ssl_adapter.private_key``: the filename of the server's private key file.
+
+Both are None by default. If ssl_adapter.context is None, but .private_key
+and .certificate are both given and valid, they will be read, and the
+context will be automatically created from them.
+"""
+
+import socket
+import threading
+import time
+
+from cherrypy import wsgiserver
+
+try:
+ from OpenSSL import SSL
+ from OpenSSL import crypto
+except ImportError:
+ SSL = None
+
+
+class SSL_fileobject(wsgiserver.CP_fileobject):
+ """SSL file object attached to a socket object."""
+
+ ssl_timeout = 3
+ ssl_retry = .01
+
+ def _safe_call(self, is_reader, call, *args, **kwargs):
+ """Wrap the given call with SSL error-trapping.
+
+ is_reader: if False EOF errors will be raised. If True, EOF errors
+ will return "" (to emulate normal sockets).
+ """
+ start = time.time()
+ while True:
+ try:
+ return call(*args, **kwargs)
+ except SSL.WantReadError:
+ # Sleep and try again. This is dangerous, because it means
+ # the rest of the stack has no way of differentiating
+ # between a "new handshake" error and "client dropped".
+ # Note this isn't an endless loop: there's a timeout below.
+ time.sleep(self.ssl_retry)
+ except SSL.WantWriteError:
+ time.sleep(self.ssl_retry)
+ except SSL.SysCallError, e:
+ if is_reader and e.args == (-1, 'Unexpected EOF'):
+ return ""
+
+ errnum = e.args[0]
+ if is_reader and errnum in wsgiserver.socket_errors_to_ignore:
+ return ""
+ raise socket.error(errnum)
+ except SSL.Error, e:
+ if is_reader and e.args == (-1, 'Unexpected EOF'):
+ return ""
+
+ thirdarg = None
+ try:
+ thirdarg = e.args[0][0][2]
+ except IndexError:
+ pass
+
+ if thirdarg == 'http request':
+ # The client is talking HTTP to an HTTPS server.
+ raise wsgiserver.NoSSLError()
+
+ raise wsgiserver.FatalSSLAlert(*e.args)
+ except:
+ raise
+
+ if time.time() - start > self.ssl_timeout:
+ raise socket.timeout("timed out")
+
+ def recv(self, *args, **kwargs):
+ buf = []
+ r = super(SSL_fileobject, self).recv
+ while True:
+ data = self._safe_call(True, r, *args, **kwargs)
+ buf.append(data)
+ p = self._sock.pending()
+ if not p:
+ return "".join(buf)
+
+ def sendall(self, *args, **kwargs):
+ return self._safe_call(False, super(SSL_fileobject, self).sendall,
+ *args, **kwargs)
+
+ def send(self, *args, **kwargs):
+ return self._safe_call(False, super(SSL_fileobject, self).send,
+ *args, **kwargs)
+
+
+class SSLConnection:
+ """A thread-safe wrapper for an SSL.Connection.
+
+ ``*args``: the arguments to create the wrapped ``SSL.Connection(*args)``.
+ """
+
+ def __init__(self, *args):
+ self._ssl_conn = SSL.Connection(*args)
+ self._lock = threading.RLock()
+
+ for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read',
+ 'renegotiate', 'bind', 'listen', 'connect', 'accept',
+ 'setblocking', 'fileno', 'close', 'get_cipher_list',
+ 'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
+ 'makefile', 'get_app_data', 'set_app_data', 'state_string',
+ 'sock_shutdown', 'get_peer_certificate', 'want_read',
+ 'want_write', 'set_connect_state', 'set_accept_state',
+ 'connect_ex', 'sendall', 'settimeout', 'gettimeout'):
+ exec("""def %s(self, *args):
+ self._lock.acquire()
+ try:
+ return self._ssl_conn.%s(*args)
+ finally:
+ self._lock.release()
+""" % (f, f))
+
+ def shutdown(self, *args):
+ self._lock.acquire()
+ try:
+ # pyOpenSSL.socket.shutdown takes no args
+ return self._ssl_conn.shutdown()
+ finally:
+ self._lock.release()
+
+
+class pyOpenSSLAdapter(wsgiserver.SSLAdapter):
+ """A wrapper for integrating pyOpenSSL with CherryPy."""
+
+ context = None
+ """An instance of SSL.Context."""
+
+ certificate = None
+ """The filename of the server SSL certificate."""
+
+ private_key = None
+ """The filename of the server's private key file."""
+
+ certificate_chain = None
+ """Optional. The filename of CA's intermediate certificate bundle.
+
+ This is needed for cheaper "chained root" SSL certificates, and should be
+ left as None if not required."""
+
+ def __init__(self, certificate, private_key, certificate_chain=None):
+ if SSL is None:
+ raise ImportError("You must install pyOpenSSL to use HTTPS.")
+
+ self.context = None
+ self.certificate = certificate
+ self.private_key = private_key
+ self.certificate_chain = certificate_chain
+ self._environ = None
+
+ def bind(self, sock):
+ """Wrap and return the given socket."""
+ if self.context is None:
+ self.context = self.get_context()
+ conn = SSLConnection(self.context, sock)
+ self._environ = self.get_environ()
+ return conn
+
+ def wrap(self, sock):
+ """Wrap and return the given socket, plus WSGI environ entries."""
+ return sock, self._environ.copy()
+
+ def get_context(self):
+ """Return an SSL.Context from self attributes."""
+ # See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
+ c = SSL.Context(SSL.SSLv23_METHOD)
+ c.use_privatekey_file(self.private_key)
+ if self.certificate_chain:
+ c.load_verify_locations(self.certificate_chain)
+ c.use_certificate_file(self.certificate)
+ return c
+
+ def get_environ(self):
+ """Return WSGI environ entries to be merged into each request."""
+ ssl_environ = {
+ "HTTPS": "on",
+ # pyOpenSSL doesn't provide access to any of these AFAICT
+## 'SSL_PROTOCOL': 'SSLv2',
+## SSL_CIPHER string The cipher specification name
+## SSL_VERSION_INTERFACE string The mod_ssl program version
+## SSL_VERSION_LIBRARY string The OpenSSL program version
+ }
+
+ if self.certificate:
+ # Server certificate attributes
+ cert = open(self.certificate, 'rb').read()
+ cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
+ ssl_environ.update({
+ 'SSL_SERVER_M_VERSION': cert.get_version(),
+ 'SSL_SERVER_M_SERIAL': cert.get_serial_number(),
+## 'SSL_SERVER_V_START': Validity of server's certificate (start time),
+## 'SSL_SERVER_V_END': Validity of server's certificate (end time),
+ })
+
+ for prefix, dn in [("I", cert.get_issuer()),
+ ("S", cert.get_subject())]:
+ # X509Name objects don't seem to have a way to get the
+ # complete DN string. Use str() and slice it instead,
+ # because str(dn) == ""
+ dnstr = str(dn)[18:-2]
+
+ wsgikey = 'SSL_SERVER_%s_DN' % prefix
+ ssl_environ[wsgikey] = dnstr
+
+ # The DN should be of the form: /k1=v1/k2=v2, but we must allow
+ # for any value to contain slashes itself (in a URL).
+ while dnstr:
+ pos = dnstr.rfind("=")
+ dnstr, value = dnstr[:pos], dnstr[pos + 1:]
+ pos = dnstr.rfind("/")
+ dnstr, key = dnstr[:pos], dnstr[pos + 1:]
+ if key and value:
+ wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key)
+ ssl_environ[wsgikey] = value
+
+ return ssl_environ
+
+ def makefile(self, sock, mode='r', bufsize=-1):
+ if SSL and isinstance(sock, SSL.ConnectionType):
+ timeout = sock.gettimeout()
+ f = SSL_fileobject(sock, mode, bufsize)
+ f.ssl_timeout = timeout
+ return f
+ else:
+ return wsgiserver.CP_fileobject(sock, mode, bufsize)
+
diff --git a/lib/nulib/python/nulib/files.py b/lib/nulib/python/nulib/files.py
new file mode 100644
index 0000000..1357944
--- /dev/null
+++ b/lib/nulib/python/nulib/files.py
@@ -0,0 +1,141 @@
+# -*- coding: utf-8 -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+"""Gestion de fichiers de formats spécifiques.
+
+Pour le moment, seuls les fichiers textes sont supportés.
+"""
+
+__all__ = ('TextFile', )
+
+from os import path
+
+from .base import isstr, make_prop
+from .uio import _s
+from .lines import Lines
+
+_marker = []
+
+class TextFile(object):
+ """Un fichier texte, accéder et modifié comme un ensemble de lignes.
+
+ L'objet qui gère les lignes de ce fichier texte sont disponibles avec la
+ propriété lines
+
+ Lors de la lecture du contenu du fichier sur disque, on détermine la
+ convention de fin de lignes utilisée, et on la reproduit lors de l'écriture
+ sur disque.
+ """
+
+ _file, file = make_prop('_file')[:2]
+ _absfile, absfile = make_prop('_absfile')[:2]
+ _absfiledir, absfiledir = make_prop('_absfiledir')[:2]
+ _absfilename, absfilename = make_prop('_absfilename')[:2]
+ _inf, inf = make_prop('_inf')[:2]
+ _lines, lines = make_prop('_lines')[:2]
+
+ def __init__(self, file=None, raise_exception=True, readlines=True, lines=None):
+ super(TextFile, self).__init__()
+ if lines is not None: self._lines = lines
+ if file is not None: self.open(file, raise_exception, readlines)
+
+ def _new_Lines(self):
+ """Retourner une nouvelle instance de BLines pour la lecture des lignes
+ du fichier.
+
+ Par défaut, retourner une instance de Lines, permettant de lire des
+ lignes en unicode. Il faut surcharger cette méthode et retourner
+ BLines() si l'on veut lire des lignes non unicode. Une alternative est
+ de passer une instance de BLines() au constructeur.
+ """
+ return Lines()
+
+ def open(self, file, raise_exception=True, readlines=True, lines=None):
+ """Spécifier un nouveau fichier à charger, et charger ses lignes
+ """
+ # reset des valeurs
+ self._file = None
+ self._absfile = None
+ self._absfiledir = None
+ self._absfilename = None
+ self._inf = None
+ if lines is not None: self._lines = lines
+ if self._lines is None: self._lines = self._new_Lines()
+ # ouvrir le fichier
+ if isstr(file):
+ self._file = file
+ self._absfile = path.abspath(self._file)
+ self._absfiledir, self._absfilename = path.split(self._absfile)
+ else:
+ self._inf = file
+ if readlines: return self.readlines(raise_exception)
+ else: return None
+
+ def close(self):
+ if self._inf is not None:
+ self._inf.close()
+ self._inf = None
+
+ def readlines(self, raise_exception=True, close=True, uio=None):
+ """Lire les lignes du fichier.
+
+ @return: self.lines
+ """
+ if self._inf is None:
+ if self._absfile is None:
+ raise ValueError("Should open() the file before reading")
+ try:
+ self._inf = open(self._absfile, 'rb')
+ except IOError:
+ if raise_exception: raise
+ if self._inf is None :
+ self._lines.reset()
+ else:
+ try:
+ self._lines.readlines(self._inf, raise_exception=raise_exception, uio=uio)
+ finally:
+ if close: self.close()
+ return self._lines
+
+ def writelines(self, lines=None, outf=None, raise_exception=True, close=True, uio=None):
+ """Ecrire les lignes self.lines dans le fichier.
+
+ si lines!=None, il s'agit du nouvel ensemble de lignes.
+
+ @return: True si l'écriture a pu se faire, False sinon.
+ @rtype: bool
+ """
+ if lines is not None: self._lines[:] = lines
+ if outf is None:
+ if self._absfile is None: raise ValueError("outf is missing or open() the file before writing")
+ outf = self._absfile
+ if isstr(outf):
+ try:
+ outf = open(self._absfile, 'wb')
+ close = True
+ except IOError:
+ if raise_exception: raise
+ return False
+ try:
+ try:
+ self._lines.writelines(outf, uio=uio)
+ return True
+ except IOError:
+ if raise_exception: raise
+ return False
+ finally:
+ if close: outf.close()
+
+ def is_valid(self): return self._lines.is_valid()
+ valid = property(is_valid)
+
+ def get_nl(self): return self._lines.get_nl()
+ def set_nl(self, nl): self._lines.set_nl(nl)
+ nl = property(get_nl, set_nl)
+
+ def reset(self): self._lines.reset()
+ def grepi(self, pattern, indexes=None, inverse=False): return self._lines.grepi(pattern, indexes, inverse)
+ def grep(self, pattern, indexes=None, inverse=False): return self._lines.grep(pattern, indexes, inverse)
+ def replace(self, pattern, repl, count=0, indexes=None): return self._lines.replace(pattern, repl, count, indexes)
+ def map(self, func, copy=False, **args): return self._lines.map(func, copy, **args)
+ def filter(self, func, copy=False, **args): return self._lines.filter(func, copy, **args)
+ def join(self): return self._lines.join()
diff --git a/lib/nulib/python/nulib/flock.py b/lib/nulib/python/nulib/flock.py
new file mode 100644
index 0000000..07409e9
--- /dev/null
+++ b/lib/nulib/python/nulib/flock.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+"""Gestion des verrous sur les fichiers.
+"""
+
+__all__ = ('cxopen', 'rdopen', 'wropen')
+
+import os
+from os import O_CREAT, O_EXCL, O_RDONLY, O_RDWR
+
+from .uio import _s
+
+def __check_mode(mode, *values, **kw):
+ for value in values:
+ if mode.startswith(value):
+ unless = kw.get("unless", None)
+ if unless is None or not mode.startswith(unless):
+ return
+ expected = values[0:1] and values[0] or u"?"
+ raise ValueError("mode should start with '%s', got '%s'" % (expected, mode))
+
+def cxopen(file, mode="r+b", bufsize=-1, perms=0777):
+ u"""Créer atomiquement un fichier et l'ouvrir en lecture/écriture.
+
+ @raise OSError: si le fichier existe déjà.
+ """
+ __check_mode(mode, 'w', 'a', 'r+')
+ fd = os.open(file, O_CREAT | O_EXCL | O_RDWR, perms)
+ return os.fdopen(fd, mode)
+
+if os.name == 'posix':
+ import fcntl
+ from fcntl import LOCK_SH, LOCK_EX, LOCK_NB
+
+ def rdopen(file, mode="rb", bufsize=-1, nowait=False):
+ u"""Ouvrir un fichier pour la lecture en tentant d'acquérir un verrou
+ partagé pour sa lecture.
+
+ Le verrou est libéré lors de la fermeture du fichier.
+ """
+ __check_mode(mode, 'r', unless='r+')
+ fd = os.open(file, O_RDONLY)
+ fcntl.flock(fd, LOCK_SH | (nowait and LOCK_NB or 0))
+ return os.fdopen(fd, mode, bufsize)
+
+ def wropen(file, mode="r+b", bufsize=-1, perms=0777, nowait=False):
+ u"""Ouvrir un fichier pour la lecture/écriture en tentant d'acquérir
+ un verrou exclusif pour son écriture.
+
+ Le verrou est libéré lors de la fermeture du fichier.
+ """
+ __check_mode(mode, 'w', 'a', 'r+')
+ fd = os.open(file, O_CREAT | O_RDWR, perms)
+ fcntl.flock(fd, LOCK_EX | (nowait and LOCK_NB or 0))
+ return os.fdopen(fd, mode, bufsize)
+
+else:
+ # pas de verrouillage sur les plateformes non supportées
+ def rdopen(file, mode="rb", bufsize=-1, nowait=False):
+ __check_mode(mode, 'r', unless='r+')
+ fd = os.open(file, O_RDONLY)
+ return os.fdopen(fd, mode, bufsize)
+
+ def wropen(file, mode="r+b", bufsize=-1, perms=0777, nowait=False):
+ __check_mode(mode, 'w', 'a', 'r+')
+ fd = os.open(file, O_CREAT | O_RDWR, perms)
+ return os.fdopen(fd, mode, bufsize)
diff --git a/lib/nulib/python/nulib/formats/__init__.py b/lib/nulib/python/nulib/formats/__init__.py
new file mode 100644
index 0000000..bb66ebb
--- /dev/null
+++ b/lib/nulib/python/nulib/formats/__init__.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ('UnicodeF', 'unicodeF',
+ 'LowerF', 'lowerF',
+ 'UpperF', 'upperF',
+ 'StripF', 'stripF',
+ 'StrF', 'strF',
+ 'BooleanF', 'booleanF',
+ 'IntegerF', 'integerF',
+ 'FloatF', 'floatF',
+ 'DateF', 'dateF',
+ 'TimeF', 'timeF',
+ 'WeekdayF', 'weekdayF',
+ 'JourF', 'jourF',
+ 'TelephoneF', 'telephoneF',
+ )
+
+from .strings import UnicodeF, LowerF, UpperF, StripF, StrF
+from .boolean import BooleanF
+from .integer import IntegerF
+from .float import FloatF
+from .datetime import DateF, TimeF
+from .weekday import WeekdayF
+from .jour import JourF
+from .telephone import TelephoneF
+
+unicodeF = UnicodeF()
+lowerF = LowerF()
+upperF = UpperF()
+stripF = StripF()
+strF = StrF()
+booleanF = BooleanF()
+integerF = IntegerF()
+floatF = FloatF()
+dateF = DateF()
+timeF = TimeF()
+weekdayF = WeekdayF()
+jourF = JourF()
+telephoneF = TelephoneF()
diff --git a/lib/nulib/python/nulib/formats/base.py b/lib/nulib/python/nulib/formats/base.py
new file mode 100644
index 0000000..d54731e
--- /dev/null
+++ b/lib/nulib/python/nulib/formats/base.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ('Format',)
+
+import re
+
+class Format(object):
+ RE_SPACES = re.compile(r'\s+')
+ RE_NUMBERS = re.compile(r'\d+')
+ RE_ONLY_NUMBERS = re.compile(r'\d+$')
+
+ def parse(self, s):
+ u"""Parser la chaine s et retourner un objet particulier.
+
+ @return: un objet
+ @raise ValueError: si l'objet est de format incorrect
+ """
+ raise NotImplementedError
+
+ def matches(self, s):
+ u"""Vérifier que s est dans le bon format
+
+ @rtype: bool
+ """
+ try:
+ self.parse(s)
+ return True
+ except ValueError:
+ return False
+
+ def format(self, o):
+ u"""Formater un objet pour affichage.
+
+ Il est requis que o soit un objet dans le bon format, tel qu'il est
+ retourné par la méthode parse().
+
+ @return: la chaine pour affichage
+ @rtype: basestring
+ """
+ raise NotImplementedError
diff --git a/lib/nulib/python/nulib/formats/boolean.py b/lib/nulib/python/nulib/formats/boolean.py
new file mode 100644
index 0000000..ae91aa1
--- /dev/null
+++ b/lib/nulib/python/nulib/formats/boolean.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ('BooleanF',)
+
+from ..uio import _s
+from ..input import is_yes, is_no
+
+from .strings import UnicodeF
+
+class BooleanF(UnicodeF):
+ def parse(self, s):
+ if s in (None, True, False): return s
+
+ s = UnicodeF.parse(self, s)
+ if is_yes(s): return True
+ elif is_no(s): return False
+ else: raise ValueError("Invalid boolean: %s" % s)
+
+ def format(self, b):
+ if b is None: return u""
+ elif b: return u"Oui"
+ else: return u"Non"
diff --git a/lib/nulib/python/nulib/formats/datetime.py b/lib/nulib/python/nulib/formats/datetime.py
new file mode 100644
index 0000000..5715d40
--- /dev/null
+++ b/lib/nulib/python/nulib/formats/datetime.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = (
+ 'DateF', 'TimeF',
+ 'format_any', 'format_range',
+)
+
+from ..dates import Date, ensure_date, FR_DATEF, pydate, pydatetime
+from ..times import Time, ensure_time, HM_TIMEF, pytime
+
+from .base import Format
+
+class DateF(Format):
+ format_string = None
+
+ def __init__(self, format_string=None):
+ if format_string is None: format_string = FR_DATEF
+ self.format_string = format_string
+
+ def parse(self, s):
+ if s is None: return None
+ else: return ensure_date(s)
+
+ def format(self, d):
+ if d is None: return u""
+ else: return d.format(self.format_string)
+
+class TimeF(Format):
+ format_string = None
+
+ def __init__(self, format_string=None):
+ if format_string is None: format_string = HM_TIMEF
+ self.format_string = format_string
+
+ def parse(self, s):
+ if s is None: return None
+ else: return ensure_time(s)
+
+ def format(self, t):
+ if t is None: return u""
+ else: return t.format(self.format_string)
+
+def format_date(d):
+ if d is None: return u""
+ else: return ensure_date(d).format()
+def format_time(t):
+ if t is None: return u""
+ else: return ensure_time(t).format()
+def format_any(dt):
+ if dt is None: return u""
+ if isinstance(dt, pydatetime):
+ d = Date._new(dt)
+ t = Time._new(dt)
+ return u"%s %s" % (d.format(), t.format())
+ if isinstance(dt, pydate): dt = Date._new(dt)
+ elif isinstance(dt, pytime): dt = Time._new(dt)
+ if isinstance(dt, Date): return dt.format()
+ elif isinstance(dt, Time): return dt.format()
+ return ensure_date(dt).format()
+
+def format_range(dts, dte):
+ if dts is None and dte is None: return u""
+ elif dte is None: return u"%s -" % format_any(dts)
+ elif dts is None: return u"- %s" % format_any(dts)
+ ds, ts = format_date(dts), format_time(dts)
+ de, te = format_date(dte), format_time(dte)
+ if de == ds:
+ if te == ts: return u"%s %s" % (ds, ts)
+ else: return u"%s %s-%s" % (ds, ts, te)
+ else: return u"%s %s - %s %s" % (ds, ts, de, te)
diff --git a/lib/nulib/python/nulib/formats/float.py b/lib/nulib/python/nulib/formats/float.py
new file mode 100644
index 0000000..d6beb43
--- /dev/null
+++ b/lib/nulib/python/nulib/formats/float.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ('FloatF',)
+
+import re
+
+from ..base import isflt
+from ..uio import _s, _u
+
+from .strings import UnicodeF
+
+class FloatF(UnicodeF):
+ RE_FLOAT = re.compile(r'(?:-)?\d+(?:\.\d+)?$')
+
+ def parse(self, s):
+ if isflt(s): return s
+
+ s = UnicodeF.parse(self, s)
+ if s is None: return None
+
+ if self.RE_FLOAT.match(s) is None:
+ raise ValueError("Invalid number: %s" % s)
+
+ return float(s)
+
+ def format(self, f):
+ if f is None: return u""
+ else: return _u(f)
diff --git a/lib/nulib/python/nulib/formats/integer.py b/lib/nulib/python/nulib/formats/integer.py
new file mode 100644
index 0000000..48bb05b
--- /dev/null
+++ b/lib/nulib/python/nulib/formats/integer.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ('IntegerF',)
+
+import re
+
+from ..base import isnum
+from ..uio import _s, _u
+
+from .strings import UnicodeF
+
+class IntegerF(UnicodeF):
+ RE_INTEGER = re.compile(r'(?:-)?\d+$')
+
+ def parse(self, s):
+ if isnum(s): return s
+
+ s = UnicodeF.parse(self, s)
+ if s is None: return None
+
+ if self.RE_INTEGER.match(s) is None:
+ raise ValueError("Invalid number: %s" % _s(s))
+
+ return int(s)
+
+ def format(self, i):
+ if i is None: return u""
+ else: return _u(i)
diff --git a/lib/nulib/python/nulib/formats/jour.py b/lib/nulib/python/nulib/formats/jour.py
new file mode 100644
index 0000000..7041de4
--- /dev/null
+++ b/lib/nulib/python/nulib/formats/jour.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ('JourF',)
+
+from ..uio import _s
+
+from .strings import UnicodeF
+
+class JourF(UnicodeF):
+ LUNDI = u"lundi"
+ MARDI = u"mardi"
+ MERCREDI = u"mercredi"
+ JEUDI = u"jeudi"
+ VENDREDI = u"vendredi"
+ SAMEDI = u"samedi"
+ DIMANCHE = u"dimanche"
+
+ JOURS = [j.capitalize() for j in (LUNDI, MARDI, MERCREDI, JEUDI, VENDREDI, SAMEDI, DIMANCHE)]
+ JOUR_MAP = {u"1": LUNDI, u"l": LUNDI, u"lun": LUNDI,
+ u"2": MARDI, u"ma": MARDI, u"mar": MARDI,
+ u"3": MERCREDI, u"me": MERCREDI, u"mer": MERCREDI,
+ u"4": JEUDI, u"j": JEUDI, u"jeu": JEUDI,
+ u"5": VENDREDI, u"v": VENDREDI, u"ven": VENDREDI,
+ u"6": SAMEDI, u"s": SAMEDI, u"sam": SAMEDI,
+ u"0": DIMANCHE, u"7": DIMANCHE, u"d": DIMANCHE, u"dim": DIMANCHE,
+ }
+
+ def parse(self, s):
+ s = UnicodeF.parse(self, s)
+ if s is None: return None
+
+ j = self.RE_SPACES.sub(u'', s)
+ j = self.JOUR_MAP.get(j.lower(), j).capitalize()
+ if j in self.JOURS: return j
+ else: raise ValueError("Invalid day name: %s" % s)
+
+ def format(self, j):
+ if j is None: return u""
+ else: return j
+
+ def get_dow(self, j):
+ return self.JOURS.index(j)
diff --git a/lib/nulib/python/nulib/formats/strings.py b/lib/nulib/python/nulib/formats/strings.py
new file mode 100644
index 0000000..0ac4e3c
--- /dev/null
+++ b/lib/nulib/python/nulib/formats/strings.py
@@ -0,0 +1,102 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ('UnicodeF',
+ 'LowerF', 'UpperF', 'StripF',
+ 'StrF',
+ )
+
+import re
+
+from ..uio import _u, _s
+
+from .base import Format
+
+class UnicodeF(Format):
+ RE_a = (re.compile(ur'[äàâ]'), u'a'); RE_A = (re.compile(ur'[ÄÀÂ]'), u'A')
+ RE_e = (re.compile(ur'[ëéèê]'), u'e'); RE_E = (re.compile(ur'[ËÉÈÊ]'), u'E')
+ RE_i = (re.compile(ur'[ïî]'), u'i'); RE_I = (re.compile(ur'[ÏÎ]'), u'I')
+ RE_o = (re.compile(ur'[öô]'), u'o'); RE_O = (re.compile(ur'[ÖÔ]'), u'O')
+ RE_u = (re.compile(ur'[üûù]'), u'u'); RE_U = (re.compile(ur'[ÜÛÙ]'), u'U')
+ RE_y = (re.compile(ur'[ÿŷ]'), u'y'); RE_Y = (re.compile(ur'[ŸŶ]'), u'Y')
+ RE_c = (re.compile(ur'[ç]'), u'c'); RE_C = (re.compile(ur'[Ç]'), u'C')
+
+ def strip_accents(cls, s):
+ for re_x, x in (cls.RE_A, cls.RE_a,
+ cls.RE_E, cls.RE_e,
+ cls.RE_I, cls.RE_i,
+ cls.RE_O, cls.RE_o,
+ cls.RE_U, cls.RE_u,
+ cls.RE_Y, cls.RE_y,
+ cls.RE_C, cls.RE_c,
+ ):
+ s = re_x.sub(x, s)
+ return s
+ strip_accents = classmethod(strip_accents)
+
+ def parse(self, s):
+ u"""Retourner la chaine en unicode, ou None si s==None
+
+ @return: une chaine ou None
+ @rtype: unicode
+ """
+ if s is None: return None
+ else: return _u(s)
+
+ def format(self, o):
+ u"""Retourner une chaine vide si o==None, ou o dans le cas contraire.
+
+ @rtype: unicode
+ """
+ if o is None: return u""
+ else: return o
+
+class LowerF(UnicodeF):
+ def parse(self, s):
+ u"""Retourner la chaine s en minuscule, ou None si s==None
+
+ @return: une chaine ou None
+ @rtype: unicode
+ """
+ s = UnicodeF.parse(self, s)
+ if s is None: return None
+ else: return s.lower()
+
+class UpperF(UnicodeF):
+ def parse(self, s):
+ u"""Retourner la chaine s en majuscule, ou None si s==None
+
+ @return: une chaine ou None
+ @rtype: unicode
+ """
+ s = UnicodeF.parse(self, s)
+ if s is None: return None
+ else: return s.upper()
+
+class StripF(UnicodeF):
+ def parse(self, s):
+ u"""Retourner la chaine s strippée, ou None si s==None
+
+ @return: une chaine ou None
+ @rtype: unicode
+ """
+ s = UnicodeF.parse(self, s)
+ if s is None: return None
+ else: return s.strip()
+
+class StrF(Format):
+ def parse(self, s):
+ u"""Retourner la chaine en str, ou None si s==None
+
+ @return: une chaine ou None
+ @rtype: str
+ """
+ if s is None: return None
+ else: return _s(s)
+
+ def format(self, o):
+ u"""Retourner une chaine vide si o==None, ou o dans le cas contraire.
+
+ @rtype: str
+ """
+ if o is None: return ""
+ else: return o
diff --git a/lib/nulib/python/nulib/formats/telephone.py b/lib/nulib/python/nulib/formats/telephone.py
new file mode 100644
index 0000000..6555fe5
--- /dev/null
+++ b/lib/nulib/python/nulib/formats/telephone.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ('TelephoneF',)
+
+import re
+
+from ..uio import _s
+
+from .strings import UnicodeF
+
+class TelephoneF(UnicodeF):
+ RE_PREFIX = re.compile(r'^00262')
+
+ def parse(self, s):
+ s = UnicodeF.parse(self, s)
+ if s is None: return None
+
+ t = self.RE_SPACES.sub(u'', s)
+ t = t.replace(u"+", "00")
+ t = self.RE_PREFIX.sub(u"0", t)
+ mo = self.RE_ONLY_NUMBERS.match(t)
+ if mo is None:
+ raise ValueError("Invalid telephone: %s" % s)
+
+ if len(t) == 6: t = u'0262' + t
+ return t
+
+ def format(self, t):
+ if t is None: return u""
+ elif len(t) == 10:
+ return u"%s %s %s %s" % (t[0:4], t[4:6], t[6:8], t[8:10])
+ else:
+ parts = []
+ while len(t) > 3:
+ parts.insert(0, t[-2:])
+ t = t[:-2]
+ parts.insert(0, t)
+ return u" ".join(parts)
diff --git a/lib/nulib/python/nulib/formats/weekday.py b/lib/nulib/python/nulib/formats/weekday.py
new file mode 100644
index 0000000..1d00c27
--- /dev/null
+++ b/lib/nulib/python/nulib/formats/weekday.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ('WeekdayF',)
+
+from ..base import isnum
+from ..uio import _s
+from ..dates import isanydate, ensure_date
+
+from .strings import UnicodeF
+
+class WeekdayF(UnicodeF):
+ LUNDI = u"lundi"
+ MARDI = u"mardi"
+ MERCREDI = u"mercredi"
+ JEUDI = u"jeudi"
+ VENDREDI = u"vendredi"
+ SAMEDI = u"samedi"
+ DIMANCHE = u"dimanche"
+
+ JOURS = [j.capitalize() for j in (LUNDI, MARDI, MERCREDI, JEUDI, VENDREDI, SAMEDI, DIMANCHE)]
+ JOUR_MAP = {u"0": LUNDI, u"l": LUNDI, u"lun": LUNDI,
+ u"1": MARDI, u"ma": MARDI, u"mar": MARDI,
+ u"2": MERCREDI, u"me": MERCREDI, u"mer": MERCREDI,
+ u"3": JEUDI, u"j": JEUDI, u"jeu": JEUDI,
+ u"4": VENDREDI, u"v": VENDREDI, u"ven": VENDREDI,
+ u"5": SAMEDI, u"s": SAMEDI, u"sam": SAMEDI,
+ u"6": DIMANCHE, u"d": DIMANCHE, u"dim": DIMANCHE,
+ }
+
+ def __parse(self, wd):
+ wd = UnicodeF.parse(self, wd)
+ j = self.RE_SPACES.sub(u'', wd)
+ j = self.JOUR_MAP.get(j.lower(), j).capitalize()
+ return j
+
+ def __invalid(self, wd):
+ return ValueError(_s(u"Jour incorrect: %s" % repr(wd)))
+
+ def get_weekday(self, wd):
+ if wd is None: return None
+ elif isnum(wd): return wd % 7
+ elif isanydate(wd): return ensure_date(wd).weekday()
+ j = self.__parse(wd)
+ try: return self.JOURS.index(j)
+ except ValueError: raise self.__invalid(wd)
+
+ def parse(self, wd):
+ if wd is None: return None
+ elif isnum(wd): return self.JOURS[wd % 7]
+ elif isanydate(wd): return self.JOURS[ensure_date(wd).weekday()]
+ j = self.__parse(wd)
+ if j in self.JOURS: return j
+ else: raise self.__invalid(wd)
+
+ def format(self, j):
+ if j is None: return u""
+ else: return j
diff --git a/lib/nulib/python/nulib/functions.py b/lib/nulib/python/nulib/functions.py
new file mode 100644
index 0000000..4dd181e
--- /dev/null
+++ b/lib/nulib/python/nulib/functions.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+"""Des fonctions pour travailler avec les fonctions.
+"""
+
+__all__ = ('apply_args', 'unpack_results')
+
+from types import MethodType, FunctionType
+
+from .uio import _s
+from .base import isseq
+
+def apply_args(func, *args, **kw):
+ u"""Appliquer à la fonction func les arguments args, en tenant compte du
+ nombre de ses arguments et en ignorant les arguments "en trop".
+
+ Par exemple, si f est défini ainsi:
+
+ def f(a, b):
+ pass
+
+ Alors:
+
+ apply_args(f, 1, 2, 3)
+
+ Est équivalent à:
+
+ apply(f, (1, 2))
+ """
+ f = func
+ ac_offset = 0
+ if type(f) is MethodType:
+ f = f.im_func
+ ac_offset = 1
+ if type(f) is not FunctionType:
+ raise ValueError("func must be a function")
+ argcount = f.func_code.co_argcount - ac_offset
+ args = args[:argcount]
+ return func(*args, **kw)
+
+def unpack_results(results, *defaults):
+ u"""results étant une valeur scalaire ou une séquence, retourner une
+ séquence ayant le même nombre de valeurs de defaults, et en fournissant
+ les valeurs de defaults si elles ne sont pas présentes dans results.
+
+ >>> unpack_results((1,2), "a", "b", "c")
+ (1, 2, 'c')
+ >>> unpack_results("x", "a", "b", "c")
+ ('x', 'b', 'c')
+ """
+ if not isseq(results): results = (results,)
+ expected = len(defaults)
+ got = len(results)
+ head = tuple(results[:expected])
+ if got < expected: tail = tuple(defaults[-(expected - got):])
+ else: tail = ()
+ return head + tail
diff --git a/lib/nulib/python/nulib/htmlentities.py b/lib/nulib/python/nulib/htmlentities.py
new file mode 100644
index 0000000..fc13d20
--- /dev/null
+++ b/lib/nulib/python/nulib/htmlentities.py
@@ -0,0 +1,358 @@
+# -*- coding: utf-8 -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+"""Des fonctions pour gérer les entités html.
+"""
+
+__all__ = ('quote_entity', 'unquote_entity',
+ 'quote_html', 'unquote_html',
+ 'quote_but_html', 'unquote_but_html',
+ 'quote_attr', 'unquote_attr',
+ )
+
+from types import UnicodeType
+
+from .uio import _u, _s
+
+name2codepoint = {
+ 'AElig': 0x00c6, # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
+ 'Aacute': 0x00c1, # latin capital letter A with acute, U+00C1 ISOlat1
+ 'Acirc': 0x00c2, # latin capital letter A with circumflex, U+00C2 ISOlat1
+ 'Agrave': 0x00c0, # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1
+ 'Alpha': 0x0391, # greek capital letter alpha, U+0391
+ 'Aring': 0x00c5, # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1
+ 'Atilde': 0x00c3, # latin capital letter A with tilde, U+00C3 ISOlat1
+ 'Auml': 0x00c4, # latin capital letter A with diaeresis, U+00C4 ISOlat1
+ 'Beta': 0x0392, # greek capital letter beta, U+0392
+ 'Ccedil': 0x00c7, # latin capital letter C with cedilla, U+00C7 ISOlat1
+ 'Chi': 0x03a7, # greek capital letter chi, U+03A7
+ 'Dagger': 0x2021, # double dagger, U+2021 ISOpub
+ 'Delta': 0x0394, # greek capital letter delta, U+0394 ISOgrk3
+ 'ETH': 0x00d0, # latin capital letter ETH, U+00D0 ISOlat1
+ 'Eacute': 0x00c9, # latin capital letter E with acute, U+00C9 ISOlat1
+ 'Ecirc': 0x00ca, # latin capital letter E with circumflex, U+00CA ISOlat1
+ 'Egrave': 0x00c8, # latin capital letter E with grave, U+00C8 ISOlat1
+ 'Epsilon': 0x0395, # greek capital letter epsilon, U+0395
+ 'Eta': 0x0397, # greek capital letter eta, U+0397
+ 'Euml': 0x00cb, # latin capital letter E with diaeresis, U+00CB ISOlat1
+ 'Gamma': 0x0393, # greek capital letter gamma, U+0393 ISOgrk3
+ 'Iacute': 0x00cd, # latin capital letter I with acute, U+00CD ISOlat1
+ 'Icirc': 0x00ce, # latin capital letter I with circumflex, U+00CE ISOlat1
+ 'Igrave': 0x00cc, # latin capital letter I with grave, U+00CC ISOlat1
+ 'Iota': 0x0399, # greek capital letter iota, U+0399
+ 'Iuml': 0x00cf, # latin capital letter I with diaeresis, U+00CF ISOlat1
+ 'Kappa': 0x039a, # greek capital letter kappa, U+039A
+ 'Lambda': 0x039b, # greek capital letter lambda, U+039B ISOgrk3
+ 'Mu': 0x039c, # greek capital letter mu, U+039C
+ 'Ntilde': 0x00d1, # latin capital letter N with tilde, U+00D1 ISOlat1
+ 'Nu': 0x039d, # greek capital letter nu, U+039D
+ 'OElig': 0x0152, # latin capital ligature OE, U+0152 ISOlat2
+ 'Oacute': 0x00d3, # latin capital letter O with acute, U+00D3 ISOlat1
+ 'Ocirc': 0x00d4, # latin capital letter O with circumflex, U+00D4 ISOlat1
+ 'Ograve': 0x00d2, # latin capital letter O with grave, U+00D2 ISOlat1
+ 'Omega': 0x03a9, # greek capital letter omega, U+03A9 ISOgrk3
+ 'Omicron': 0x039f, # greek capital letter omicron, U+039F
+ 'Oslash': 0x00d8, # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1
+ 'Otilde': 0x00d5, # latin capital letter O with tilde, U+00D5 ISOlat1
+ 'Ouml': 0x00d6, # latin capital letter O with diaeresis, U+00D6 ISOlat1
+ 'Phi': 0x03a6, # greek capital letter phi, U+03A6 ISOgrk3
+ 'Pi': 0x03a0, # greek capital letter pi, U+03A0 ISOgrk3
+ 'Prime': 0x2033, # double prime = seconds = inches, U+2033 ISOtech
+ 'Psi': 0x03a8, # greek capital letter psi, U+03A8 ISOgrk3
+ 'Rho': 0x03a1, # greek capital letter rho, U+03A1
+ 'Scaron': 0x0160, # latin capital letter S with caron, U+0160 ISOlat2
+ 'Sigma': 0x03a3, # greek capital letter sigma, U+03A3 ISOgrk3
+ 'THORN': 0x00de, # latin capital letter THORN, U+00DE ISOlat1
+ 'Tau': 0x03a4, # greek capital letter tau, U+03A4
+ 'Theta': 0x0398, # greek capital letter theta, U+0398 ISOgrk3
+ 'Uacute': 0x00da, # latin capital letter U with acute, U+00DA ISOlat1
+ 'Ucirc': 0x00db, # latin capital letter U with circumflex, U+00DB ISOlat1
+ 'Ugrave': 0x00d9, # latin capital letter U with grave, U+00D9 ISOlat1
+ 'Upsilon': 0x03a5, # greek capital letter upsilon, U+03A5 ISOgrk3
+ 'Uuml': 0x00dc, # latin capital letter U with diaeresis, U+00DC ISOlat1
+ 'Xi': 0x039e, # greek capital letter xi, U+039E ISOgrk3
+ 'Yacute': 0x00dd, # latin capital letter Y with acute, U+00DD ISOlat1
+ 'Yuml': 0x0178, # latin capital letter Y with diaeresis, U+0178 ISOlat2
+ 'Zeta': 0x0396, # greek capital letter zeta, U+0396
+ 'aacute': 0x00e1, # latin small letter a with acute, U+00E1 ISOlat1
+ 'acirc': 0x00e2, # latin small letter a with circumflex, U+00E2 ISOlat1
+ 'acute': 0x00b4, # acute accent = spacing acute, U+00B4 ISOdia
+ 'aelig': 0x00e6, # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1
+ 'agrave': 0x00e0, # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1
+ 'alefsym': 0x2135, # alef symbol = first transfinite cardinal, U+2135 NEW
+ 'alpha': 0x03b1, # greek small letter alpha, U+03B1 ISOgrk3
+ 'amp': 0x0026, # ampersand, U+0026 ISOnum
+ 'and': 0x2227, # logical and = wedge, U+2227 ISOtech
+ 'ang': 0x2220, # angle, U+2220 ISOamso
+ 'aring': 0x00e5, # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1
+ 'asymp': 0x2248, # almost equal to = asymptotic to, U+2248 ISOamsr
+ 'atilde': 0x00e3, # latin small letter a with tilde, U+00E3 ISOlat1
+ 'auml': 0x00e4, # latin small letter a with diaeresis, U+00E4 ISOlat1
+ 'bdquo': 0x201e, # double low-9 quotation mark, U+201E NEW
+ 'beta': 0x03b2, # greek small letter beta, U+03B2 ISOgrk3
+ 'brvbar': 0x00a6, # broken bar = broken vertical bar, U+00A6 ISOnum
+ 'bull': 0x2022, # bullet = black small circle, U+2022 ISOpub
+ 'cap': 0x2229, # intersection = cap, U+2229 ISOtech
+ 'ccedil': 0x00e7, # latin small letter c with cedilla, U+00E7 ISOlat1
+ 'cedil': 0x00b8, # cedilla = spacing cedilla, U+00B8 ISOdia
+ 'cent': 0x00a2, # cent sign, U+00A2 ISOnum
+ 'chi': 0x03c7, # greek small letter chi, U+03C7 ISOgrk3
+ 'circ': 0x02c6, # modifier letter circumflex accent, U+02C6 ISOpub
+ 'clubs': 0x2663, # black club suit = shamrock, U+2663 ISOpub
+ 'cong': 0x2245, # approximately equal to, U+2245 ISOtech
+ 'copy': 0x00a9, # copyright sign, U+00A9 ISOnum
+ 'crarr': 0x21b5, # downwards arrow with corner leftwards = carriage return, U+21B5 NEW
+ 'cup': 0x222a, # union = cup, U+222A ISOtech
+ 'curren': 0x00a4, # currency sign, U+00A4 ISOnum
+ 'dArr': 0x21d3, # downwards double arrow, U+21D3 ISOamsa
+ 'dagger': 0x2020, # dagger, U+2020 ISOpub
+ 'darr': 0x2193, # downwards arrow, U+2193 ISOnum
+ 'deg': 0x00b0, # degree sign, U+00B0 ISOnum
+ 'delta': 0x03b4, # greek small letter delta, U+03B4 ISOgrk3
+ 'diams': 0x2666, # black diamond suit, U+2666 ISOpub
+ 'divide': 0x00f7, # division sign, U+00F7 ISOnum
+ 'eacute': 0x00e9, # latin small letter e with acute, U+00E9 ISOlat1
+ 'ecirc': 0x00ea, # latin small letter e with circumflex, U+00EA ISOlat1
+ 'egrave': 0x00e8, # latin small letter e with grave, U+00E8 ISOlat1
+ 'empty': 0x2205, # empty set = null set = diameter, U+2205 ISOamso
+ 'emsp': 0x2003, # em space, U+2003 ISOpub
+ 'ensp': 0x2002, # en space, U+2002 ISOpub
+ 'epsilon': 0x03b5, # greek small letter epsilon, U+03B5 ISOgrk3
+ 'equiv': 0x2261, # identical to, U+2261 ISOtech
+ 'eta': 0x03b7, # greek small letter eta, U+03B7 ISOgrk3
+ 'eth': 0x00f0, # latin small letter eth, U+00F0 ISOlat1
+ 'euml': 0x00eb, # latin small letter e with diaeresis, U+00EB ISOlat1
+ 'euro': 0x20ac, # euro sign, U+20AC NEW
+ 'exist': 0x2203, # there exists, U+2203 ISOtech
+ 'fnof': 0x0192, # latin small f with hook = function = florin, U+0192 ISOtech
+ 'forall': 0x2200, # for all, U+2200 ISOtech
+ 'frac12': 0x00bd, # vulgar fraction one half = fraction one half, U+00BD ISOnum
+ 'frac14': 0x00bc, # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum
+ 'frac34': 0x00be, # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum
+ 'frasl': 0x2044, # fraction slash, U+2044 NEW
+ 'gamma': 0x03b3, # greek small letter gamma, U+03B3 ISOgrk3
+ 'ge': 0x2265, # greater-than or equal to, U+2265 ISOtech
+ 'gt': 0x003e, # greater-than sign, U+003E ISOnum
+ 'hArr': 0x21d4, # left right double arrow, U+21D4 ISOamsa
+ 'harr': 0x2194, # left right arrow, U+2194 ISOamsa
+ 'hearts': 0x2665, # black heart suit = valentine, U+2665 ISOpub
+ 'hellip': 0x2026, # horizontal ellipsis = three dot leader, U+2026 ISOpub
+ 'iacute': 0x00ed, # latin small letter i with acute, U+00ED ISOlat1
+ 'icirc': 0x00ee, # latin small letter i with circumflex, U+00EE ISOlat1
+ 'iexcl': 0x00a1, # inverted exclamation mark, U+00A1 ISOnum
+ 'igrave': 0x00ec, # latin small letter i with grave, U+00EC ISOlat1
+ 'image': 0x2111, # blackletter capital I = imaginary part, U+2111 ISOamso
+ 'infin': 0x221e, # infinity, U+221E ISOtech
+ 'int': 0x222b, # integral, U+222B ISOtech
+ 'iota': 0x03b9, # greek small letter iota, U+03B9 ISOgrk3
+ 'iquest': 0x00bf, # inverted question mark = turned question mark, U+00BF ISOnum
+ 'isin': 0x2208, # element of, U+2208 ISOtech
+ 'iuml': 0x00ef, # latin small letter i with diaeresis, U+00EF ISOlat1
+ 'kappa': 0x03ba, # greek small letter kappa, U+03BA ISOgrk3
+ 'lArr': 0x21d0, # leftwards double arrow, U+21D0 ISOtech
+ 'lambda': 0x03bb, # greek small letter lambda, U+03BB ISOgrk3
+ 'lang': 0x2329, # left-pointing angle bracket = bra, U+2329 ISOtech
+ 'laquo': 0x00ab, # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum
+ 'larr': 0x2190, # leftwards arrow, U+2190 ISOnum
+ 'lceil': 0x2308, # left ceiling = apl upstile, U+2308 ISOamsc
+ 'ldquo': 0x201c, # left double quotation mark, U+201C ISOnum
+ 'le': 0x2264, # less-than or equal to, U+2264 ISOtech
+ 'lfloor': 0x230a, # left floor = apl downstile, U+230A ISOamsc
+ 'lowast': 0x2217, # asterisk operator, U+2217 ISOtech
+ 'loz': 0x25ca, # lozenge, U+25CA ISOpub
+ 'lrm': 0x200e, # left-to-right mark, U+200E NEW RFC 2070
+ 'lsaquo': 0x2039, # single left-pointing angle quotation mark, U+2039 ISO proposed
+ 'lsquo': 0x2018, # left single quotation mark, U+2018 ISOnum
+ 'lt': 0x003c, # less-than sign, U+003C ISOnum
+ 'macr': 0x00af, # macron = spacing macron = overline = APL overbar, U+00AF ISOdia
+ 'mdash': 0x2014, # em dash, U+2014 ISOpub
+ 'micro': 0x00b5, # micro sign, U+00B5 ISOnum
+ 'middot': 0x00b7, # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum
+ 'minus': 0x2212, # minus sign, U+2212 ISOtech
+ 'mu': 0x03bc, # greek small letter mu, U+03BC ISOgrk3
+ 'nabla': 0x2207, # nabla = backward difference, U+2207 ISOtech
+ 'nbsp': 0x00a0, # no-break space = non-breaking space, U+00A0 ISOnum
+ 'ndash': 0x2013, # en dash, U+2013 ISOpub
+ 'ne': 0x2260, # not equal to, U+2260 ISOtech
+ 'ni': 0x220b, # contains as member, U+220B ISOtech
+ 'not': 0x00ac, # not sign, U+00AC ISOnum
+ 'notin': 0x2209, # not an element of, U+2209 ISOtech
+ 'nsub': 0x2284, # not a subset of, U+2284 ISOamsn
+ 'ntilde': 0x00f1, # latin small letter n with tilde, U+00F1 ISOlat1
+ 'nu': 0x03bd, # greek small letter nu, U+03BD ISOgrk3
+ 'oacute': 0x00f3, # latin small letter o with acute, U+00F3 ISOlat1
+ 'ocirc': 0x00f4, # latin small letter o with circumflex, U+00F4 ISOlat1
+ 'oelig': 0x0153, # latin small ligature oe, U+0153 ISOlat2
+ 'ograve': 0x00f2, # latin small letter o with grave, U+00F2 ISOlat1
+ 'oline': 0x203e, # overline = spacing overscore, U+203E NEW
+ 'omega': 0x03c9, # greek small letter omega, U+03C9 ISOgrk3
+ 'omicron': 0x03bf, # greek small letter omicron, U+03BF NEW
+ 'oplus': 0x2295, # circled plus = direct sum, U+2295 ISOamsb
+ 'or': 0x2228, # logical or = vee, U+2228 ISOtech
+ 'ordf': 0x00aa, # feminine ordinal indicator, U+00AA ISOnum
+ 'ordm': 0x00ba, # masculine ordinal indicator, U+00BA ISOnum
+ 'oslash': 0x00f8, # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1
+ 'otilde': 0x00f5, # latin small letter o with tilde, U+00F5 ISOlat1
+ 'otimes': 0x2297, # circled times = vector product, U+2297 ISOamsb
+ 'ouml': 0x00f6, # latin small letter o with diaeresis, U+00F6 ISOlat1
+ 'para': 0x00b6, # pilcrow sign = paragraph sign, U+00B6 ISOnum
+ 'part': 0x2202, # partial differential, U+2202 ISOtech
+ 'permil': 0x2030, # per mille sign, U+2030 ISOtech
+ 'perp': 0x22a5, # up tack = orthogonal to = perpendicular, U+22A5 ISOtech
+ 'phi': 0x03c6, # greek small letter phi, U+03C6 ISOgrk3
+ 'pi': 0x03c0, # greek small letter pi, U+03C0 ISOgrk3
+ 'piv': 0x03d6, # greek pi symbol, U+03D6 ISOgrk3
+ 'plusmn': 0x00b1, # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum
+ 'pound': 0x00a3, # pound sign, U+00A3 ISOnum
+ 'prime': 0x2032, # prime = minutes = feet, U+2032 ISOtech
+ 'prod': 0x220f, # n-ary product = product sign, U+220F ISOamsb
+ 'prop': 0x221d, # proportional to, U+221D ISOtech
+ 'psi': 0x03c8, # greek small letter psi, U+03C8 ISOgrk3
+ 'quot': 0x0022, # quotation mark = APL quote, U+0022 ISOnum
+ 'rArr': 0x21d2, # rightwards double arrow, U+21D2 ISOtech
+ 'radic': 0x221a, # square root = radical sign, U+221A ISOtech
+ 'rang': 0x232a, # right-pointing angle bracket = ket, U+232A ISOtech
+ 'raquo': 0x00bb, # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum
+ 'rarr': 0x2192, # rightwards arrow, U+2192 ISOnum
+ 'rceil': 0x2309, # right ceiling, U+2309 ISOamsc
+ 'rdquo': 0x201d, # right double quotation mark, U+201D ISOnum
+ 'real': 0x211c, # blackletter capital R = real part symbol, U+211C ISOamso
+ 'reg': 0x00ae, # registered sign = registered trade mark sign, U+00AE ISOnum
+ 'rfloor': 0x230b, # right floor, U+230B ISOamsc
+ 'rho': 0x03c1, # greek small letter rho, U+03C1 ISOgrk3
+ 'rlm': 0x200f, # right-to-left mark, U+200F NEW RFC 2070
+ 'rsaquo': 0x203a, # single right-pointing angle quotation mark, U+203A ISO proposed
+ 'rsquo': 0x2019, # right single quotation mark, U+2019 ISOnum
+ 'sbquo': 0x201a, # single low-9 quotation mark, U+201A NEW
+ 'scaron': 0x0161, # latin small letter s with caron, U+0161 ISOlat2
+ 'sdot': 0x22c5, # dot operator, U+22C5 ISOamsb
+ 'sect': 0x00a7, # section sign, U+00A7 ISOnum
+ 'shy': 0x00ad, # soft hyphen = discretionary hyphen, U+00AD ISOnum
+ 'sigma': 0x03c3, # greek small letter sigma, U+03C3 ISOgrk3
+ 'sigmaf': 0x03c2, # greek small letter final sigma, U+03C2 ISOgrk3
+ 'sim': 0x223c, # tilde operator = varies with = similar to, U+223C ISOtech
+ 'spades': 0x2660, # black spade suit, U+2660 ISOpub
+ 'sub': 0x2282, # subset of, U+2282 ISOtech
+ 'sube': 0x2286, # subset of or equal to, U+2286 ISOtech
+ 'sum': 0x2211, # n-ary sumation, U+2211 ISOamsb
+ 'sup': 0x2283, # superset of, U+2283 ISOtech
+ 'sup1': 0x00b9, # superscript one = superscript digit one, U+00B9 ISOnum
+ 'sup2': 0x00b2, # superscript two = superscript digit two = squared, U+00B2 ISOnum
+ 'sup3': 0x00b3, # superscript three = superscript digit three = cubed, U+00B3 ISOnum
+ 'supe': 0x2287, # superset of or equal to, U+2287 ISOtech
+ 'szlig': 0x00df, # latin small letter sharp s = ess-zed, U+00DF ISOlat1
+ 'tau': 0x03c4, # greek small letter tau, U+03C4 ISOgrk3
+ 'there4': 0x2234, # therefore, U+2234 ISOtech
+ 'theta': 0x03b8, # greek small letter theta, U+03B8 ISOgrk3
+ 'thetasym': 0x03d1, # greek small letter theta symbol, U+03D1 NEW
+ 'thinsp': 0x2009, # thin space, U+2009 ISOpub
+ 'thorn': 0x00fe, # latin small letter thorn with, U+00FE ISOlat1
+ 'tilde': 0x02dc, # small tilde, U+02DC ISOdia
+ 'times': 0x00d7, # multiplication sign, U+00D7 ISOnum
+ 'trade': 0x2122, # trade mark sign, U+2122 ISOnum
+ 'uArr': 0x21d1, # upwards double arrow, U+21D1 ISOamsa
+ 'uacute': 0x00fa, # latin small letter u with acute, U+00FA ISOlat1
+ 'uarr': 0x2191, # upwards arrow, U+2191 ISOnum
+ 'ucirc': 0x00fb, # latin small letter u with circumflex, U+00FB ISOlat1
+ 'ugrave': 0x00f9, # latin small letter u with grave, U+00F9 ISOlat1
+ 'uml': 0x00a8, # diaeresis = spacing diaeresis, U+00A8 ISOdia
+ 'upsih': 0x03d2, # greek upsilon with hook symbol, U+03D2 NEW
+ 'upsilon': 0x03c5, # greek small letter upsilon, U+03C5 ISOgrk3
+ 'uuml': 0x00fc, # latin small letter u with diaeresis, U+00FC ISOlat1
+ 'weierp': 0x2118, # script capital P = power set = Weierstrass p, U+2118 ISOamso
+ 'xi': 0x03be, # greek small letter xi, U+03BE ISOgrk3
+ 'yacute': 0x00fd, # latin small letter y with acute, U+00FD ISOlat1
+ 'yen': 0x00a5, # yen sign = yuan sign, U+00A5 ISOnum
+ 'yuml': 0x00ff, # latin small letter y with diaeresis, U+00FF ISOlat1
+ 'zeta': 0x03b6, # greek small letter zeta, U+03B6 ISOgrk3
+ 'zwj': 0x200d, # zero width joiner, U+200D NEW RFC 2070
+ 'zwnj': 0x200c, # zero width non-joiner, U+200C NEW RFC 2070
+}
+
+codepoint2name = {}
+entitydefs = {}
+
+for name in name2codepoint.keys():
+ codepoint = name2codepoint[name]
+ codepoint2name[codepoint] = name
+ if codepoint <= 0xff: entitydefs[name] = unichr(codepoint)
+ else: entitydefs[name] = u'%d;' % codepoint
+
+UNQUOTE_ENTITY_NAMES = QUOTE_ENTITY_NAMES = tuple(filter(
+ lambda n: n not in ('amp',), name2codepoint.keys()))
+UNQUOTE_HTML_NAMES = QUOTE_HTML_NAMES = ('lt', 'gt')
+UNQUOTE_BUT_HTML_NAMES = QUOTE_BUT_HTML_NAMES = tuple(filter(
+ lambda n: n not in ('amp', 'lt', 'gt', 'quot'), name2codepoint.keys()))
+UNQUOTE_ATTR_NAMES = QUOTE_ATTR_NAMES = ('lt', 'gt', 'quot')
+
+UNQUOTE_ENTITY_NAMES = UNQUOTE_ENTITY_NAMES + ('amp',)
+QUOTE_ENTITY_NAMES = ('amp',) + QUOTE_ENTITY_NAMES
+
+UNQUOTE_HTML_NAMES = UNQUOTE_HTML_NAMES + ('amp',)
+QUOTE_HTML_NAMES = ('amp',) + QUOTE_HTML_NAMES
+
+UNQUOTE_ATTR_NAMES = UNQUOTE_ATTR_NAMES + ('amp',)
+QUOTE_ATTR_NAMES = ('amp',) + QUOTE_ATTR_NAMES
+
+def __quote(utext, encoding=None, names=QUOTE_HTML_NAMES):
+ recode = False
+ if type(utext) is not UnicodeType:
+ utext = _u(utext, encoding)
+ recode = True
+ for name in names:
+ char = unichr(name2codepoint[name])
+ if utext.find(char) != - 1:
+ utext = utext.replace(char, u'&%s;' % name)
+ if recode: utext = _s(utext, encoding)
+ return utext
+
+def __unquote(utext, encoding=None, names=UNQUOTE_HTML_NAMES):
+ recode = False
+ if type(utext) is not UnicodeType:
+ utext = _u(utext, encoding)
+ recode = True
+ for name in names:
+ entity = u'&%s;' % name
+ if utext.find(entity) != - 1:
+ utext = utext.replace(entity, unichr(name2codepoint[name]))
+ if recode: utext = _s(utext, encoding)
+ return utext
+
+def quote_entity(utext, encoding=None):
+ return __quote(utext, encoding, QUOTE_ENTITY_NAMES)
+
+def quote_html(utext, encoding=None):
+ return __quote(utext, encoding, QUOTE_HTML_NAMES)
+
+def quote_but_html(utext, encoding=None):
+ return __quote(utext, encoding, QUOTE_BUT_HTML_NAMES)
+
+def quote_attr(utext, encoding=None):
+ return __quote(utext, encoding, QUOTE_ATTR_NAMES)
+
+def unquote_entity(utext, encoding=None):
+ return __unquote(utext, encoding, UNQUOTE_ENTITY_NAMES)
+
+def unquote_html(utext, encoding=None):
+ return __unquote(utext, encoding, UNQUOTE_HTML_NAMES)
+
+def unquote_but_html(utext, encoding=None):
+ return __unquote(utext, encoding, UNQUOTE_BUT_HTML_NAMES)
+
+def unquote_attr(utext, encoding=None):
+ return __unquote(utext, encoding, UNQUOTE_ATTR_NAMES)
+
+if __name__ == '__main__':
+ print u"""
+
+
+Liste des entités
+
+
+
Liste des entités qui sont pris en charge par ce module
+
+
name
char (plain)
char (entity)
""".encode("utf-8")
+ for name, entity in entitydefs.items():
+ print (u"
""" % (active, url, css, accesskey, title)]
+
+ def __dropdown(self, menu, profile):
+ children = []
+ for mitem in menu.mitems:
+ children.extend(self.__mitem(mitem, profile))
+ if not children:
+ # ne pas afficher un menu vide
+ return []
+ active = ' active' if menu.active else ''
+ lines = [u"""
""")
+ return lines
+
+ def __call__(self, s=None, p=None, t=None):
+ if t is None: t = self.navbar_type
+ if s is not None: self.select(s, p)
+ selection = self.get_mitem()
+ lines = []
+ if t == 'fixed' or t == 'fixed-top':
+ lines.append(u"""""")
+ css = u''
+ css = ui.addclassif('navbar-fixed-top', t == 'fixed' or t == 'fixed-top', css)
+ css = ui.addclassif('navbar-static-top', t == 'static' or t == 'static-top', css)
+ if self.profiles is not None and self.sel_profile is not None:
+ css = ui.addclassif('%s-profile' % self.sel_profile, None, css)
+ css = ui.addclassif(self.css, None, css)
+ lines.append(u"""
""" % css)
+ lines.append(u"""
+
+
+
+
+
""")
+ if self.profiles is not None:
+ # gestion des profils
+ lines.append(u"""""")
+ for mitem in self.mitems:
+ if isinstance(mitem, ui.Menu):
+ lines.extend(self.__dropdown(mitem, self.sel_profile))
+ else:
+ lines.extend(self.__mitem(mitem, self.sel_profile))
+ lines.append(u"""
""")
+ title = self.title
+ if title is not None:
+ title = web.websafe(title)
+ lines.append(u"""
%s
""" % title)
+ lines.append(u"""
+
+
""")
+ return u"\n".join(lines)
+
+ def __unicode__(self): return self()
+
+def set_menu(menu):
+ """Décorateur qui permet d'initialiser une instance de menu dans la session
+ """
+ def decorator(method):
+ def wrapper(self, *args, **kw):
+ session = _ensure_session(self)
+ session.menu = menu
+ return method(self, *args, **kw)
+ return wrapper
+ return decorator
+
+def menu(id=None, p=None):
+ """Décorateur qui permet de mettre à jour la sélection et le profil dans le menu
+ """
+ def decorator(method):
+ def wrapper(self, *args, **kw):
+ session = _ensure_session(self)
+ if 'menu' not in session:
+ raise ValueError("menu is required")
+ if id is not None:
+ session.menu.select(id, p)
+ return method(self, *args, **kw)
+ return wrapper
+ return decorator
+
+################################################################################
+# about
+
+def About(summary, details=None, onShowDetails=None, js=True):
+ lines = []
+ if js:
+ lines.append(u"""""")
+ if callable(summary): summary = summary()
+ lines.extend([
+ u'
',
+ u'
',
+ summary,
+ ])
+ if details is not None:
+ lines.extend([u'En savoir plus...'])
+ lines.extend([u'
'])
+ if details is not None:
+ if callable(details): details = details()
+ lines.extend([
+ u'
',
+ details,
+ u'
',
+ ])
+ lines.extend([u'
'])
+ return u'\n'.join([u(line) for line in lines])
+
+################################################################################
+# Alert
+
+ALERT_TYPE_MAP = dict(
+ done="success",
+ warn="warning", notice="warning",
+ error="danger",
+)
+class Alert(object):
+ msg = None
+ exc_info = None
+ type = None
+ closeable = None
+ escape = None
+ action = None
+ showtb = None
+
+ def __init__(self, msg=None, e=Undef, t="error", c=False, x=None, action=None, showtb=True):
+ self(msg, e, t, c, x, action, showtb)
+
+ def __call__(self, msg=Undef, e=Undef, t=Undef, c=Undef, x=Undef, action=Undef, showtb=Undef):
+ if msg is Undef:
+ # si on ne spécifie pas de message, alors prendre la valeur actuelle
+ msg = self.msg
+ if e is Undef: e = self.exc_info
+ else:
+ # si on spécifie un message, alors prendre aussi l'exception courante
+ if e is Undef: e = sys.exc_info()
+ if e == (None, None, None): e = None
+ if t is Undef: t = self.type
+ if c is Undef: c = self.closeable
+ if x is Undef: x = self.escape
+ if action is Undef: action = self.action
+ if showtb is Undef: showtb = self.showtb
+
+ self.msg = msg
+ self.exc_info = e
+ self.type = ALERT_TYPE_MAP.get(t, t)
+ self.closeable = c
+ self.escape = x
+ self.action = action
+ self.showtb = showtb
+ return self
+
+ def render(self, msg=Undef, e=Undef, t=Undef, c=Undef, x=Undef, action=Undef, showtb=Undef):
+ if msg is Undef:
+ # si on ne spécifie pas de message, alors prendre la valeur initiale
+ msg = self.msg
+ if e is Undef: e = self.exc_info
+ else:
+ # si on spécifie un message, alors prendre aussi l'exception courante
+ if e is Undef: e = sys.exc_info()
+ if t is Undef: t = self.type
+ if c is Undef: c = self.closeable
+ if x is Undef: x = self.escape
+ if action is Undef: action = self.action
+ if showtb is Undef: showtb = self.showtb
+
+ if callable(msg):
+ # si msg est callable, par défaut ne pas mettre le résultat en
+ # échappement
+ if x is None: x = False
+ msg = msg()
+ if x is None: x = True
+
+ if msg is None and e is not None:
+ msg = u"Une erreur inattendue s'est produite"
+ if msg is None: return u""
+ if x: msg = web.websafe(msg)
+
+ lines = []
+ css = u"alert alert-%s" % t
+ css = ui.addclassif("alert-dismissible", c, css)
+ lines.append(u"""
""" % css)
+ if c:
+ lines.append(u"""""")
+ lines.append(p(msg))
+ if action is not None:
+ if callable(action): action = action()
+ if isinstance(action, Action):
+ url = web.websafe(action.url)
+ title = web.websafe(action.title or u"Cliquez ici pour continuer")
+ action = u"""
""" % (url, title)
+ lines.append(p(action))
+ if e is not None:
+ lines.append(u"""
Pour information, le message d'erreur technique est """)
+ lines.extend(traceback.format_exception_only(*e[:2]))
+ if showtb:
+ lines.append(u"")
+ lines.append(u"""
""")
+ lines.append(u"""
""")
+ return u"\n".join([u(line) for line in lines])
+ def __unicode__(self): return self.render()
+
+def set_alert(template=None, action=Undef, delay=None):
+ """Décorateur qui permet de gérer automatiquement une instance de Alert dans la
+ page.
+
+ * Une instance vide self.alert = Alert() est créée dans la page
+ * Si la valeur de retour n'est pas une instance de Alert, aucun traitement
+ particulier supplémentaire n'est effectué
+ * Si une exception se produit et qu'on n'est pas en mode debug, créer une
+ nouvelle instance de Alert avec le message de l'exception.
+ * Sinon:
+ * Si template==None, afficher une page d'erreur générique avec une alerte
+ de type Error.
+ * Sinon, mettre à jour la variable alert avec la valeur retournée et
+ effectuer le rendu du template spécifié.
+ """
+ def decorator(method):
+ def wrapper(self, *args, **kw):
+ self.alert = Alert(action=action)
+ try:
+ alert = method(self, *args, **kw)
+ except:
+ if web.config.debug: raise
+ else: alert = Alert("Une erreur inattendue s'est produite")
+ if isinstance(alert, Alert):
+ if alert.type == 'success':
+ title = u"Succès"
+ elif alert.type == 'warning':
+ title = u"Avertissement"
+ else:
+ title = u"Erreur"
+ if action is not Undef and alert.action is None:
+ alert.action = action
+ if template is not None:
+ self.alert = alert
+ return self.r(seqof(template), self)
+ else:
+ lines = []
+ lines.append(u"""\
+
+""")
+ lines.append(load())
+ if alert.action and delay is not None:
+ url = web.websafe(alert.action.url)
+ lines.append(u"""\
+""" % (delay, url))
+ lines.append(u"""\
+%(title)s
+
+
+
+
+""")
+ return u"\n".join(lines)
+ else:
+ return alert
+ return wrapper
+ return decorator
diff --git a/lib/nulib/python/nulib/web/config_loader.py b/lib/nulib/python/nulib/web/config_loader.py
new file mode 100644
index 0000000..149c18e
--- /dev/null
+++ b/lib/nulib/python/nulib/web/config_loader.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ('config',)
+
+from os import path
+from types import StringTypes
+
+import config as app_config
+BASE_CONFIG = getattr(app_config, 'BASE_CONFIG', {})
+PROD_CONFIG = getattr(app_config, 'PROD_CONFIG', {})
+PROFILE = getattr(app_config, 'PROFILE', None)
+PROFILES = getattr(app_config, 'PROFILES', dict(prod=PROD_CONFIG))
+
+_error = object()
+class Config(object):
+ # Par défaut, ne pas lancer d'exception mais retourner None pour une clé
+ # inexistante. Cependant dans ce cas il n'est plus possible d'utiliser
+ # correctement la fonction getattr(). L'alternative est d'utiliser la
+ # méthode get() de cette classe.
+ _raise_on_error = False
+ def set_raise_on_error(self, raise_on_error=True):
+ self._raise_on_error = raise_on_error
+
+ def set_appname(self, appname):
+ BASE_CONFIG['appname'] = appname
+ def set_basedir(self, basedir):
+ BASE_CONFIG['basedir'] = basedir
+ def set_profile(self, profile):
+ if profile is not None and profile not in PROFILES:
+ raise ValueError("%s: invalid profile" % profile)
+ global PROFILE
+ PROFILE = BASE_CONFIG['profile'] = profile
+
+ def __getattr__(self, name):
+ if PROFILE is not None:
+ config = PROFILES[PROFILE]
+ if name in config: return config[name]
+ if name in BASE_CONFIG: return BASE_CONFIG[name]
+ if self._raise_on_error: raise AttributeError(name)
+ else: return None
+
+ def __getitem__(self, key, default=_error):
+ if PROFILE is not None:
+ config = PROFILES[PROFILE]
+ if key in config: return config[key]
+ if key in BASE_CONFIG: return BASE_CONFIG[key]
+ if default is _error and self._raise_on_error: raise KeyError(key)
+ else: return default
+ get = __getitem__
+ def __contains__(self, key):
+ if PROFILE is not None:
+ config = PROFILES[PROFILE]
+ if key in config: return True
+ return key in BASE_CONFIG
+ has_key = __contains__
+
+config = Config()
diff --git a/lib/nulib/python/nulib/web/model.py b/lib/nulib/python/nulib/web/model.py
new file mode 100644
index 0000000..266fed5
--- /dev/null
+++ b/lib/nulib/python/nulib/web/model.py
@@ -0,0 +1,252 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = (
+ 'LazyObject',
+ 'dbconfig', 'fixsqlite', 'lazydb', 'oradbinfos',
+ 'incd', 'excd', 'Row', 'RowCtl',
+ 'Migration',
+)
+
+from os import path
+import csv
+
+from ..ext import web
+
+class LazyObject(object):
+ """Un objet proxy vers un autre objet qui est construit avec retard
+ """
+ _object = None # objet final
+
+ def __init__(self):
+ self.__dict__['_object'] = None
+
+ def _resolve(self):
+ """méthode à dériver: elle doit construire et retourner l'objet final
+ """
+
+ def __object(self):
+ if self._object is None:
+ self.__dict__['_object'] = self._resolve()
+ return self._object
+
+ def __getattr__(self, name):
+ return getattr(self.__object(), name)
+ def __setattr__(self, name, value):
+ if name in self.__dict__: self.__dict__[name] = value
+ else: setattr(self.__object(), name, value)
+ def __delattr__(self, name):
+ if name in self.__dict__: del self.__dict__[name]
+ else: delattr(self.__object(), name)
+
+ def __getitem__(self, key):
+ return self.__object()[key]
+ def __setitem__(self, key, value):
+ self.__object()[key] = value
+ def __delitem__(self, key):
+ del self.__object()[key]
+
+ def __call__(self, *args, **kw):
+ return self.__object()(*args, **kw)
+
+def dbconfig(prefix='db'):
+ from .config_loader import config
+ dbtype = config.get('%stype' % prefix, None)
+ dbname = config.get('%sname' % prefix, None)
+ dbuser = config.get('%suser' % prefix, None)
+ dbpass = config.get('%spass' % prefix, None)
+ if dbtype is None: dbtype = 'sqlite'
+ if dbname is None: dbname = ':memory:'
+ kw = dict(dbn=dbtype, db=dbname)
+ if dbuser is not None: kw.update(user=dbuser, pw=dbpass)
+ return kw
+
+def fixsqlite(kw):
+ # pour les bases sqlite, un chemin relatif est exprimée par rapport à
+ # $basedir/var
+ # config.basedir n'est dispo qu'après l'initialisation de l'application.
+ # Cette fonction doit donc être appelée dans une instance de LazyObject
+ if kw['dbn'] == 'sqlite' and kw['db'] != ':memory' and not path.isabs(kw['db']):
+ from .config_loader import config
+ kw['db'] = path.join(config.basedir, 'var', '%s.sqlite' % kw['db'])
+ return kw
+
+def lazydb(prefix='db'):
+ class LazyDb(LazyObject):
+ _resolve = lambda self: web.database(**fixsqlite(dbconfig(prefix)))
+ return LazyDb()
+
+class oradbinfos(object):
+ def __init__(self, db):
+ self.db = db
+ def __call__(self):
+ kw = self.db.keywords
+ return u"%s@%s" % (kw['user'], kw['dsn'])
+ def __unicode__(self):
+ return self()
+
+class Row(object):
+ """un proxy vers une ligne de web.database
+
+ cet objet est prévu pour être étendu avec de nouvelles propriétés calculées.
+ """
+ _row = None
+
+ def __init__(self, row):
+ self.__dict__['_row'] = row
+
+ def __contains__(self, name):
+ return name in self._row
+ def __getattr__(self, name):
+ row = self._row
+ try:
+ return getattr(row, name)
+ except AttributeError:
+ uname = name.upper()
+ if hasattr(row, uname): return getattr(row, uname)
+ lname = name.lower()
+ if hasattr(row, lname): return getattr(row, lname)
+ raise
+ def __setattr__(self, name, value):
+ if name in self.__dict__: self.__dict__[name] = value
+ else: setattr(self._row, name, value)
+ def __delattr__(self, name):
+ if name in self.__dict__: del self.__dict__[name]
+ else: delattr(self._row, name)
+
+ def __getitem__(self, key):
+ row = self._row
+ try:
+ return row[key]
+ except KeyError:
+ ukey = key.upper()
+ if ukey in row: return row[ukey]
+ lkey = key.lower()
+ if lkey in row: return row[lkey]
+ raise
+ def __setitem__(self, key, value):
+ self._row[key] = value
+ def __delitem__(self, key):
+ del self._row[key]
+
+ def __repr__(self): return repr(self._row)
+ def __str__(self): return str(self._row)
+
+def incd(fd, *names):
+ td = dict()
+ if not names:
+ names = [name for name in fd.keys() if name != 'self']
+ for name in names:
+ value = fd.get(name, None)
+ if value is not None:
+ td[name] = value
+ return td
+def excd(fd, *excludes):
+ td = dict()
+ excludes = set(excludes)
+ excludes.add('self')
+ names = [name for name in fd.keys() if name not in excludes]
+ for name in names:
+ value = fd.get(name, None)
+ if value is not None:
+ td[name] = value
+ return td
+
+class DBMixin(object):
+ def dbquery(self, *args, **kw): return self._map(self.DB.query(*args, **kw))
+ def dbwhere(self, *args, **kw): return self._map(self.DB.where(*args, **kw))
+ def dbselect(self, *args, **kw): return self._map(self.DB.select(*args, **kw))
+ query, where, select = dbquery, dbwhere, dbselect
+
+ def dbinsert(self, *args, **kw): return self.DB.insert(*args, **kw)
+ def dbmultiple_insert(self, *args, **kw): return self.DB.multiple_insert(*args, **kw)
+ def dbupdate(self, *args, **kw): return self.DB.update(*args, **kw)
+ def dbdelete(self, *args, **kw): return self.DB.delete(*args, **kw)
+ insert, multiple_insert, update, delete = dbinsert, dbmultiple_insert, dbupdate, dbdelete
+
+ def dbtrans(self, *args, **kw): return self.DB.transaction(*args, **kw)
+ transaction = dbtrans
+
+ def _fix_NULL(self, row, allow_null=True):
+ if allow_null:
+ for key, value in row.items():
+ if value == 'NULL':
+ row[key] = None
+ return row
+
+ def dbloadcsv(self, table, file, allow_null=True):
+ inf = open(file, "rb")
+ try:
+ reader = csv.DictReader(inf)
+ self.dbmultiple_insert(table, [self._fix_NULL(row, allow_null) for row in reader])
+ finally:
+ inf.close()
+
+ def tbwhere(self, *args, **kw): return self._map(self.DB.where(self.TB, *args, **kw))
+ def tbselect(self, *args, **kw): return self._map(self.DB.select(self.TB, *args, **kw))
+ def tbinsert(self, *args, **kw): return self.DB.insert(self.TB, *args, **kw)
+ def tbmultiple_insert(self, *args, **kw): return self.DB.multiple_insert(self.TB, *args, **kw)
+ def tbupdate(self, *args, **kw): return self.DB.update(self.TB, *args, **kw)
+ def tbdelete(self, *args, **kw): return self.DB.delete(self.TB, *args, **kw)
+
+ def _parse(self, kw):
+ pass
+ def find(self, **kw):
+ self._parse(kw)
+ return self.tbwhere(**kw)
+ def get(self, **kw):
+ return web.iterbetter(self.find(**kw))[0]
+ def create(self, **kw):
+ self._parse(kw)
+ return self.tbinsert(**kw)
+ def pkupdate(self, pk, **kw):
+ self._parse(kw)
+ self.tbupdate("%s = $pk" % self.PK, vars=locals(), **excd(kw, 'pk'))
+ def pkdelete(self, pk):
+ self.tbdelete("%s = $pk" % self.PK, vars=locals())
+
+class RowCtl(DBMixin):
+ DB = None
+ TB = None
+ ROW = None
+ PK = "id"
+ PARSERS = None
+
+ def _parse(self, kw):
+ parsers = self.PARSERS
+ if parsers is not None:
+ for key, parser in parsers.items():
+ if key in kw:
+ kw[key] = parser(kw[key])
+ def _map(self, items):
+ ROW = self.ROW
+ if ROW is None: return items
+ else: return (ROW(item) for item in items)
+
+class Migration(DBMixin):
+ DB = None
+ NAME = None
+
+ def _map(self, items):
+ return items
+
+ def migrate(self):
+ version = self.initial() + 1
+ while True:
+ method = getattr(self, 'version%i' % version, None)
+ if method is None: break
+ method()
+ self.dbquery("update _dbupdater set version = $version where name = $name",
+ vars = dict(version=version, name=self.NAME))
+ version += 1
+
+ def __dbinfos(self):
+ return self.dbwhere('_dbupdater', name=self.NAME).first()
+ def initial(self):
+ try: dbinfos = self.__dbinfos()
+ except:
+ self.dbquery("create table _dbupdater (name varchar(64), version integer)")
+ dbinfos = self.__dbinfos()
+ if not dbinfos:
+ self.dbinsert('_dbupdater', name=self.NAME, version=-1)
+ dbinfos = self.__dbinfos()
+ return dbinfos.version
diff --git a/lib/nulib/python/nulib/web/pages.py b/lib/nulib/python/nulib/web/pages.py
new file mode 100644
index 0000000..fd22bcf
--- /dev/null
+++ b/lib/nulib/python/nulib/web/pages.py
@@ -0,0 +1,573 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = (
+ 'nocache', 'auth', 'defaults',
+ 'reset_session', 'set_session', 'session', 'check_session',
+ 'Page', 'Application',
+)
+
+import os, sys, base64, logging, re, shlex
+from os import path
+
+from ..base import isstr, isbool, isnum, isflt, seqof, make_prop
+from ..uio import _u
+from ..output import set_verbosity, einfo, enote, edebug
+from ..args import get_args
+from ..dates import rfc2822
+from ..functions import apply_args
+from ..words import plural
+from ..formats import unicodeF, strF, booleanF, integerF, floatF
+from ..config import ShConfig
+from ..ext import web
+
+from . import ui, blueprint, bootstrap
+from .api import _ensure_session
+
+DEFAULT_TEMPLATE_GLOBALS = {
+ 'checked': ui.checked, 'selected': ui.selected, 'disabled': ui.disabled, 'required': ui.required,
+ 'accesskeyif': ui.accesskeyif,
+ 'classif': ui.classif, 'addclassif': ui.addclassif,
+ 'favicon': ui.favicon, 'css': ui.css, 'js': ui.js, 'jscss': ui.jscss,
+ 'bp': blueprint, 'blueprintcss': blueprint.load,
+ 'bs': bootstrap, 'bootstrapcss': bootstrap.load,
+ 'plural': plural,
+}
+
+HANDLER_CLASSES = []
+class MetaPage(type):
+ """Méta-classe qui suit la définition des classes dérivées de Page, afin
+ de construire automatiquement la liste des classes handler.
+ """
+ def __init__(cls, name, bases, attrs):
+ type.__init__(cls, name, bases, attrs)
+ abstract = cls.__dict__.get('__abstract_page__', False)
+ if not abstract:
+ _fix_PATH(cls, name)
+ HANDLER_CLASSES.append(cls)
+
+def nocache(method):
+ """Décorateur pour s'assurer que le résultat de la méthode web n'est pas mis en cache
+ """
+ def wrapper(self, *args, **kw):
+ web.header("Expires", rfc2822(0))
+ web.header("Pragma", "no-cache")
+ web.header("Cache-control", "max-age=0, no-cache, must-revalidate")
+ return method(self, *args, **kw)
+ return wrapper
+
+def auth(authenticator=None, realm='nulib'):
+ """Décorateur pour s'assurer que la méthode web est authentifiée.
+
+ La fonction authenticator avec la signature (username, password) permet
+ d'authentifier l'utilisateur connecté.
+ """
+ def decorator(method):
+ def wrapper(self, *args, **kw):
+ env = web.ctx.environ
+ if env.has_key('HTTP_AUTHORIZATION'):
+ auth = env['HTTP_AUTHORIZATION'].split()
+ if auth[0:1] and auth[0].lower() == 'basic':
+ username, password = base64.b64decode(auth[1]).split(':')
+ if authenticator is None or apply_args(authenticator, username, password):
+ return method(self, *args, **kw)
+ web.header('WWW-Authenticate', 'Basic realm="%s"' % realm, True)
+ return web.HTTPError('401 Unauthorized')
+ return wrapper
+ return decorator
+
+def defaults(*required, **defaults):
+ """Initialiser dans l'objet courant des variables à des valeurs par
+ défaut, ou en les prenant parmi les paramètres de la requête.
+
+ Si la valeur par défaut est respectivement une chaine, un booléen, un entier,
+ une valeur flottante, la valeur de la requête est automatiquement convertie
+ et la valeur effective sera toujours respectivement une chaine, un booléen,
+ un entier, une valeur flottante.
+
+ Si la valeur par défaut est None, la valeur de la requête sera convertie en
+ None si elle est vide.
+
+ Si la valeur est une fonction, elle est appelée pour formatter la valeur de
+ la requête reçue.
+ """
+ def decorator(method):
+ def wrapper(self, *args, **kw):
+ # defaults doit être purgé des valeurs "callable", elles ne sont
+ # utilisées que pour le formattage
+ webdefaults = {}
+ for key, value in defaults.items():
+ if callable(value): webdefaults[key] = None
+ else: webdefaults[key] = value
+ webinput = self.defaults = web.input(*required, **webdefaults)
+ def fix(name, format=None, formatter=None):
+ value = webinput.get(name, None)
+ if format is None and not value:
+ webinput[name] = None
+ elif format is not None:
+ webinput[name] = format.parse(value)
+ elif formatter is not None:
+ webinput[name] = formatter(value)
+ for name, value in defaults.items():
+ if isstr(value): fix(name, unicodeF)
+ elif isbool(value): fix(name, booleanF)
+ elif isnum(value): fix(name, integerF)
+ elif isflt(value): fix(name, floatF)
+ elif callable(value): fix(name, None, value)
+ elif value is None: fix(name)
+ return method(self, *args, **kw)
+ return wrapper
+ return decorator
+
+def reset_session(**initial):
+ """réinitialiser la session avec les valeurs initiales spécifiées
+
+ s'assurer que l'objet courant contient la variable session. si les sessions
+ n'ont pas été configurées au niveau de l'application, une exception est
+ levée
+ """
+ def decorator(method):
+ def wrapper(self, *args, **kw):
+ session = _ensure_session(self)
+ # session_id est requis
+ initial.update(session_id=session.session_id)
+ session._data.clear()
+ session.update(initial)
+ return method(self, *args, **kw)
+ return wrapper
+ return decorator
+
+def set_session(**values):
+ """initialiser la session avec les valeurs spécifiées
+
+ s'assurer que l'objet courant contient la variable session. si les sessions
+ n'ont pas été configurées au niveau de l'application, une exception est
+ levée
+ """
+ def decorator(method):
+ def wrapper(self, *args, **kw):
+ session = _ensure_session(self)
+ session.update(values)
+ return method(self, *args, **kw)
+ return wrapper
+ return decorator
+
+def session(**defaults):
+ """s'assurer que les variables spécifiées existent dans la session. Sinon,
+ initialiser la session avec les valeurs par défaut spécifiées.
+
+ s'assurer que l'objet courant contient la variable session. si les sessions
+ n'ont pas été configurées au niveau de l'application, une exception est
+ levée
+ """
+ def decorator(method):
+ def wrapper(self, *args, **kw):
+ session = _ensure_session(self)
+ for key, value in defaults.items():
+ if key not in session:
+ session[key] = value
+ return method(self, *args, **kw)
+ return wrapper
+ return decorator
+
+def check_session(_onerror=None, **validators):
+ """vérifier les valeurs des variables spécifiées dans la session.
+ pour chaque éléments de validators, si la valeur est scalaire, vérifier que
+ le session contient cette valeur exacte. si la valeur est callable, ce doit
+ être une fonction qui prend un seul argument, la valeur qui est actuellement
+ dans la session. si la fonction retourne True, alors la valeur est vérifiée.
+
+ si la valeur n'est pas vérifiée, retourner un code 401 unauthorized, sauf
+ si _onerror est spécifé. dans ce cas, c'est l'url d'une page vers laquelle
+ il faut faire une redirection.
+
+ s'assurer que l'objet courant contient la variable session. si les sessions
+ n'ont pas été configurées au niveau de l'application, une exception est
+ levée
+ """
+ def decorator(method):
+ def wrapper(self, *args, **kw):
+ session = _ensure_session(self)
+ validated = True
+ for key, value in validators.items():
+ if key not in session:
+ validated = False
+ break
+ elif callable(value):
+ if not value(session[key]):
+ validated = False
+ break
+ else:
+ if session[key] != value:
+ validated = False
+ break
+ if not validated:
+ if _onerror is not None: return web.redirect(_onerror)
+ else: raise web.unauthorized("invalid session")
+ return method(self, *args, **kw)
+ return wrapper
+ return decorator
+
+def _fix_PATH(cls, name):
+ if cls.PATH is None:
+ if cls.PREFIX is None:
+ cls.PREFIX = '/%s' % name
+ if cls.PREFIX == '/': cls.PATH = '/(.*)'
+ else: cls.PATH = '%s(?:/(.*))?' % cls.PREFIX
+
+class Page(object):
+ __metaclass__ = MetaPage
+ __abstract_page__ = True
+
+ app = None # instance de Application
+ _render = None # instance de web.template.render
+
+ TEMPLATEDIR = None # répertoire relatif des templates pour cette classe
+
+ PREFIX = None # url relatif d'accès aux méthodes de cette classe
+ # par défaut, PREFIX = '/%s' % le_nom_de_la_classe
+ PATH = None # expression régulière qui matche les urls traités par cette classe
+ # par défaut, PATH = '%s/(.*)' % PREFIX
+
+ defaults = None
+ def __getattr__(self, name):
+ defaults = self.defaults
+ if defaults is not None:
+ if defaults.has_key(name):
+ return defaults[name]
+ raise AttributeError(name)
+
+ def init(self):
+ """Initialiser l'objet
+ """
+
+ def get_last_error(self, exc_info=None):
+ """Obtenir le message d'erreur de la dernière exception, ainsi que la
+ classe CSS associée.
+
+ msg vaut None s'il n'y a pas d'erreur. msgclass peut valoir success,
+ warning ou error.
+
+ @return: msg, msgclass
+ """
+ if exc_info is None: exc_info = sys.exc_info()[:2]
+ type, value = exc_info[:2]
+ if type is None:
+ return None, u"success"
+ elif isinstance(value, Warning):
+ return u"%s: %s" % (_u(type.__name__), _u(value)), u"warning"
+ elif isinstance(value, Exception):
+ return u"%s: %s" % (_u(type.__name__), _u(value)), u"error"
+ last_error = property(get_last_error)
+
+ def redirect(self, path, **kw):
+ return web.redirect(web.url(path, **kw))
+
+ def _utf8plaintext(self, text):
+ web.header("Content-Type", "text/plain; charset=utf-8", True)
+ return text.encode("utf-8")
+
+ def r(self, name, *args, **kw):
+ if name is None or name == 'index': name = self.__class__.__name__
+ elif type(name) is tuple: name = '_'.join(name)
+ else: name = '%s_%s' % (self.__class__.__name__, name)
+ templatedir = getattr(self, 'TEMPLATEDIR', None)
+ if templatedir is not None:
+ name = path.join(templatedir, name)
+ render = getattr(self._render, name)
+ return render(*args, **kw)
+
+ def render(self, *args, **kw):
+ return self.r(None, *args, **kw)
+
+ NAME = None # nom de la méthode à appeler, ou None s'il n'y a pas de méthode définie
+ METHOD = None # GET ou POST
+ def __response(self, name, method):
+ if name is None and method == "GET":
+ # si on essaie d'accéder à /page, rediriger vers /page/
+ return web.redirect("%s/%s" % (web.ctx.path, web.ctx.query))
+ self.NAME = name
+ self.METHOD = method
+ m = None
+ if m is None and name is not None and hasattr(self, name):
+ m = getattr(self, name)
+ if m is None and hasattr(self, self.__class__.__name__):
+ m = getattr(self, self.__class__.__name__)
+ if m is None and not name and hasattr(self, 'index'):
+ m = getattr(self, 'index')
+ if m is None:
+ m = self.error
+ # XXX Ajouter un argument en fonction de la valeur du header Accept:, pour text/plain,
+ # text/html, application/xml et application/xhtml+xml
+ return apply_args(m, method)
+
+ def GET(self, name=None): return self.__response(name, 'GET')
+ def POST(self, name=None): return self.__response(name, 'POST')
+ def error(self): raise web.notfound()
+ def index(self): return self.render()
+
+class Application(object):
+ # si ce fichier existe dans basedir, alors forcer le mode développement: la
+ # variable HOST est ignorée et DEBUG vaut par défaut True
+ DEVEL_SF = '.devel'
+ # nom du fichier de configuration à charger automatiquement à partir de
+ # basedir. pour désactiver cette fonctionnalité, utiliser la valeur None
+ CONFIG_FILE = 'config/server.conf'
+ CONFIG_VARS = dict(NAME=strF, HOST=strF, PORT=integerF, DEBUG=booleanF, PROFILE=strF)
+ # configuration par défaut
+ NAME = None # nom de l'application
+ HOST = '0.0.0.0'
+ PORT = 7080
+ DEBUG = False
+ PROFILE = None
+
+ webapp = None # instance de web.application
+ session = None # instance de web.session.Session
+
+ basedir = None # répertoire de base pour les fichiers servis
+
+ render = None # instance de web.template.render
+ templatedir = None # répertoire des templates
+ template_globals = None # dictionnaire global pour render
+
+ args = None # arguments qui restent sur la ligne de commande
+
+ def __init__(self, basedir=None, templatedir=None, host=None, port=None, debug=None, profile=None):
+ if self.basedir is None or basedir is not None:
+ if basedir is None:
+ # par défaut, basedir est le répertoire qui contient la classe
+ module_name = self.__class__.__module__
+ module = sys.modules.get(module_name, None)
+ if module is not None:
+ file = module.__file__
+ if file is not None:
+ basedir = path.abspath(path.split(file)[0])
+ if basedir is None:
+ basedir = os.getcwd()
+ self.basedir = basedir
+ if self.templatedir is None or templatedir is not None:
+ if templatedir is None:
+ templatedir = path.join(self.basedir, 'templates')
+ self.templatedir = templatedir
+
+ devel = path.exists(path.join(self.basedir, self.DEVEL_SF))
+ if devel: einfo("Mode developpement activé")
+
+ if self.CONFIG_FILE is not None:
+ config = path.join(self.basedir, self.CONFIG_FILE)
+ if path.exists(config):
+ c = ShConfig(config, self.CONFIG_VARS)
+ for name, value in c.items():
+ if devel and name == 'HOST': continue
+ if devel and name == 'DEBUG' and value is None: value = True
+ if value is not None: setattr(self, name, value)
+
+ if self.NAME is None:
+ # par défaut, le nom de l'application est le nom du répertoire de
+ # base
+ self.NAME = path.basename(self.basedir)
+
+ if host is not None: self.HOST = host
+ if port is not None: self.PORT = port
+ if debug is not None: self.DEBUG = debug
+ if profile is not None: self.PROFILE = profile
+
+ tg = self.template_globals
+ if tg is None: tg = {}
+ tg.update(DEFAULT_TEMPLATE_GLOBALS)
+ self.template_globals = tg
+
+ OPTIONS = None # options
+ LONG_OPTIONS = None
+ def process_option(self, option, value):
+ """Traiter une option et retourner True, ou False si l'option n'est
+ pas reconnue.
+
+ Cette méthode est prévue pour être dérivée.
+ """
+ return False
+
+ # process_option() s'appelait auparavant is_option(). Cette méthode est
+ # gardée pour compatibilité
+ def is_option(self, option, value): return False
+
+ def process_args(self, args):
+ """Traiter les arguments après les options.
+
+ Cette méthode est appelée avant que soit initialisé le serveur web, et
+ est prévue pour être dérivée.
+ """
+
+ def before_start(self):
+ """Effectuer un traitement avant de lancer le serveur web standalone.
+
+ Cette méthode est appelée juste avant que soit lancé le serveur web, et
+ est prévue pour être dérivée.
+ """
+ if web.config.debug:
+ einfo("Bienvenue sur %s" % self.NAME)
+ profile = self.PROFILE
+ if profile is None: edebug("Lancement sans profil spécifié")
+ else: enote("Lancement dans le profil %s" % profile)
+ edebug("basedir: %s" % self.basedir)
+ edebug("templatedir: %s" % self.templatedir)
+ edebug("Configuration des routes: %s" % str(self.webapp.mapping))
+ enote("Lancement du serveur sur http://%s:%i" % (self.HOST, self.PORT))
+
+ def new_render(self, templatedir=None):
+ """Retourner une nouvelle instance de web.template.render
+ """
+ if templatedir is None: templatedir = self.templatedir
+ return web.template.render(templatedir,
+ cache=not self.DEBUG,
+ globals=self.template_globals)
+
+ def _new_application(self):
+ """Créer une nouvelle instance de web.application
+ """
+ return web.application()
+
+ def _new_session(self):
+ """Si les sessions ne sont pas supportées, retourner None.
+
+ Sinon, retourner un tuple (store, initializer) où:
+ - store est une instance de web.session.Store, qui sert à stocker les
+ sessions.
+ - initializer est le dictionnaire initial, et peut être omis
+
+ Exemple:
+ return web.session.DiskStore('sessions'), {}
+ """
+ return None
+
+ def _configure_handler_class(self, handler_class):
+ self.webapp.add_mapping(handler_class.PATH, handler_class)
+
+ def _configure_handler(self, handler):
+ handler.app = self
+ handler._render = self.render
+ handler.init()
+
+ def _wsgifunc(self):
+ """Obtenir la fonction pour servir les requêtes wsgi
+ """
+ return self.webapp.wsgifunc()
+
+ def _start_server(self, server_type=None, server_socket=None):
+ """Démarrer le serveur web
+ """
+ func = self._wsgifunc()
+
+ if os.environ.has_key('SERVER_SOFTWARE'): # cgi
+ os.environ['FCGI_FORCE_CGI'] = 'Y'
+
+ if (os.environ.has_key('PHP_FCGI_CHILDREN') #lighttpd fastcgi
+ or os.environ.has_key('SERVER_SOFTWARE')):
+ return web.wsgi.runfcgi(func, None)
+
+ if server_type in ('fcgi', 'fastcgi'):
+ return web.wsgi.runfcgi(func, server_socket or (self.HOST, self.PORT))
+ elif server_type in ('scgi', ):
+ return web.wsgi.runscgi(func, server_socket or (self.HOST, self.PORT))
+ else:
+ return web.httpserver.runsimple(func, (self.HOST, self.PORT))
+
+ def __configure(self):
+ web.config.debug = self.DEBUG
+ web.config.BASEDIR = self.basedir
+
+ self.webapp = self._new_application()
+ self.webapp.set_handler_configurator(self._configure_handler)
+ for handler_class in HANDLER_CLASSES:
+ self._configure_handler_class(handler_class)
+
+ session = None
+ tmp = seqof(self._new_session())
+ store = tmp[0:1] and tmp[0] or None
+ initializer = tmp[1:2] and tmp[1] or None
+ if store is not None:
+ # rendre compatible l'utilisation des sessions avec le mode debub
+ session = web.config.get("_session", None)
+ if session is None:
+ session = web.session.Session(self.webapp, store, initializer=initializer)
+ self.session = web.config._session = session
+
+ tg = self.template_globals
+ # session et application comme variables globales du template
+ tg.setdefault("S", session)
+ tg.setdefault("A", self)
+
+ self.render = web.template.render(self.templatedir, cache=not self.DEBUG, globals=tg)
+
+ def wsgiapp(self):
+ """Retourner l'instance à utiliser pour mod_wsgi.
+
+ mod_wsgi s'attend à trouver une variable nommée application dans le
+ module spécifié dans la configuration. Il faut donc utiliser cette
+ méthode de cette façon:
+
+ class MyApp(Application):
+ ...
+
+ application = MyApp().wsgiapp()
+ """
+ self.__configure()
+ return self._wsgifunc()
+
+ def run(self, args=None, **ignored):
+ """Démarrer le serveur, en standalone, fastcgi ou scgi suivant les
+ arguments de la ligne de commande.
+
+ Utiliser cette méthode de cette façon:
+
+ class MyApp(Application):
+ ...
+
+ if __name__ == '__main__':
+ import sys
+ MyApp().run(sys.argv[1:])
+ """
+ if args is None: args = []
+ options, args = get_args(map(_u, args),
+ 'S:s:H:P:Dp:' + (self.OPTIONS or ''),
+ ['server-type=', 'server-socket=', 'host=', 'port=', 'debug', 'profile='] + list(self.LONG_OPTIONS or ()))
+ server_type = None
+ server_socket = None
+ for option, value in options:
+ if self.process_option(option, value): pass
+ elif self.is_option(option, value): pass #compatibilité
+ elif option in ('-S', '--server-type'): server_type = value
+ elif option in ('-s', '--server-socket'): server_socket = value
+ elif option in ('-H', '--host'): self.HOST = value
+ elif option in ('-P', '--port'): self.PORT = int(value)
+ elif option in ('-D', '--debug'):
+ self.DEBUG = True
+ set_verbosity('--debug')
+ elif option in ('-p', '--profile'): self.PROFILE = value
+ self.args = args
+ self.process_args(args)
+ self.__configure()
+ self.before_start()
+ self._start_server(server_type, server_socket)
+
+ def cgirun(self):
+ """Retourner un handler CGI. A utiliser avec Google AppEngine
+
+ Utiliser cette méthode de cette façon:
+
+ class MyApp(Application):
+ ...
+
+ main = MyApp().cgirun()
+
+ if __name__ == '__main__':
+ main()
+ """
+ self.__configure()
+ wsgifunc = self._wsgifunc()
+ try:
+ from google.appengine.ext.webapp.util import run_wsgi_app
+ return run_wsgi_app(wsgifunc)
+ except ImportError:
+ # we're not running from within Google App Engine
+ import wsgiref
+ return wsgiref.handlers.CGIHandler().run(wsgifunc)
diff --git a/lib/nulib/python/nulib/web/ui.py b/lib/nulib/python/nulib/web/ui.py
new file mode 100644
index 0000000..9f86282
--- /dev/null
+++ b/lib/nulib/python/nulib/web/ui.py
@@ -0,0 +1,294 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = (
+ 'checked', 'selected', 'disabled', 'required',
+ 'accesskeyif',
+ 'classif', 'addclassif',
+ 'favicon', 'css', 'js', 'jscss',
+ 'u', 'p',
+ 'Action', 'Menu',
+)
+
+from types import StringType, UnicodeType
+import urlparse, urllib
+
+from nulib.base import odict, isseq, seqof
+from nulib.web import web
+
+def checked(b):
+ return u' checked="checked"' if b else u''
+def selected(b):
+ return u' selected="selected"' if b else u''
+def disabled(b):
+ return u' disabled="disabled"' if b else u''
+def required(b):
+ return u' required="required"' if b else u''
+
+def accesskeyif(ak, b=None):
+ if b is None: b = ak
+ return u' accesskey="%s"' % ak if b else u''
+
+def classif(c, b=None):
+ if b is None: b = c
+ return u' class="%s"' % c if b else u''
+def addclassif(c, b=None, prefix=None):
+ if b is None: b = c
+ if prefix is None: prefix = u''
+ if b: return u'%s %s' % (prefix, c)
+ else: return prefix
+
+def favicon(href):
+ return ur'' % href
+def css(href, media=None):
+ if media is not None:
+ media = ur' media="%s"' % media
+ return ur'' % (href, media or u'')
+def js(href):
+ return ur'' % href
+def jscss(href, min=False):
+ if href.endswith(".min.js"):
+ jshref = href
+ csshref = "%s.css" % href[:-len(".min.js")]
+ elif href.endswith(".js"):
+ jshref = href
+ csshref = "%s.css" % href[:-len(".js")]
+ elif href.endswith(".css"):
+ jshref = "%s.js" % href[:-len(".css")]
+ csshref = href
+ return u"%s\n%s" % (js(jshref), css(csshref))
+
+def u(text):
+ """Si text n'est pas de l'unicode, le convertir en unicode avec l'encoding utf-8
+ """
+ t = type(text)
+ if t is not UnicodeType:
+ if t is not StringType: text = str(text)
+ text = unicode(text, "utf-8")
+ return text
+
+def p(text):
+ """Si text commence par un tag, le laisser tel quel, sinon le mettre entre
et
+ """
+ text = u(text)
+ if text[0:1] == '<': return text
+ else: return u'
%s
' % text
+
+################################################################################
+# Menus
+
+class Action(odict):
+ """Un lien ou une action d'un formulaire
+
+ url: cible de l'action ou du lien (avec querystring)
+ title: titre du lien ou de l'élément de menu
+ id: identifiant dans un menu
+ p (in_profiles): profils dans lequel l'action est valide. le premier profil
+ est le profil par défaut
+ c (css): classe css à appliquer au lien
+ ak (accesskey): accesskey à installer sur le lien
+ q (query): paramètres de la requête, sous forme de dictionnaire ou de chaine
+ method: type de requête, get ou post
+ PROFILE_url, PROFILE_query: valeurs spécifiques à certains profils, PROFILE
+ étant une valeur de in_profiles
+ to_PROFILE_url, to_PROFILE_query: valeurs spécifiques pour basculer vers le
+ profil spécifié, PROFILE étant une valeur de in_profiles
+
+ les valeurs *url et *query sont reconstruites: baseurl et basequery sont
+ extrait d'url et query est reconstruit en le fusionnant avec basequery
+
+ Les valeurs suivantes sont générées:
+ baseurl: cible de l'action ou du lien (sans querystring)
+ """
+
+ def __merge_query(self, query, basequery):
+ if query is None: query = {}
+ elif not isinstance(query, dict):
+ query = urlparse.parse_qs(query, True)
+ return dict(basequery, **query)
+ def __update_url(self, urlkey, querykey, baseurlkey):
+ url = self.get(urlkey, None)
+ if url is None: return
+ urlsplit = urlparse.urlsplit(url)
+ baseurl = urlparse.urlunsplit(urlsplit[:3] + ('',) + urlsplit[4:])
+ basequery = urlparse.parse_qs(urlsplit[3], True)
+ query = self.get(querykey, None)
+ query = self.__merge_query(query, basequery)
+ url = urlparse.urlunsplit(
+ urlsplit[:3]
+ + (urllib.urlencode(query, True),)
+ + urlsplit[4:])
+ self[baseurlkey] = baseurl
+ self[querykey] = query
+ self[urlkey] = url
+
+ def __init__(self, url, title=None, id=None, p=None, c=None, ak=None, q=None, method=None, **kw):
+ super(Action, self).__init__(**kw)
+ if isseq(url):
+ if url[1:2]: title = url[1]
+ if url[2:3]: id = url[2]
+ if url[3:4]: p = url[3]
+ if url[4:5]: c = url[4]
+ if url[5:6]: ak = url[5]
+ if url[6:7]: method = url[6]
+ url = url[0] if url[0:1] else None
+ if url is None: raise ValueError("url is required")
+ if title is None: title = url
+ if p is None: p = kw.get('in_profiles', None)
+ if c is None: c = kw.get('css', None)
+ if ak is None: ak = kw.get('accesskey', None)
+ if q is None: q = kw.get('query', None)
+ if method is None: method = 'get'
+
+ self.title = title
+ self.id = id
+ self.css = c
+ self.accesskey = ak
+ self.method = method
+
+ self.url = url
+ self.query = q
+ self.__update_url('url', 'query', 'baseurl')
+
+ self.in_profiles = seqof(p, None)
+ if self.in_profiles is None: return
+ for p in self.in_profiles:
+ self.__update_url(*['%s_%s' % (p, key) for key in ('url', 'query', 'baseurl')])
+ self.__update_url(*['to_%s_%s' % (p, key) for key in ('url', 'query', 'baseurl')])
+
+ def __get_key(self, key, p=None, to=False):
+ if p is not None:
+ pkey = '%s%s_%s' % ('to_' if to else '', p, key)
+ value = self.get(pkey, None)
+ if value is not None: return value
+ return self[key]
+ def get_baseurl(self, p=None, to=False):
+ """obtenir l'url de base
+ """
+ return self.__get_key('baseurl', p, to)
+ def get_query(self, p=None, to=False):
+ """obtenir les paramètres de la requête sous forme de dictionnaire
+ """
+ return self.__get_key('query', p, to)
+ def get_url(self, p=None, to=False):
+ """obtenir l'url
+ """
+ return self.__get_key('url', p, to)
+
+ def get_qs(self, p=None, to=False, sep=None):
+ """obtenir les paramètres de la requête sous forme de query-string
+ sep vaut par défaut '?' mais peut valoir '&'
+ """
+ query = self.get_query(p, to)
+ if sep is None: sep = '?'
+ return '%s%s' % (sep, urllib.urlencode(query, True))
+ qs = property(get_qs)
+ get_querystring = get_qs; querystring = property(get_qs)
+
+ def get_inputs(self, p=None, to=False):
+ """obtenir les paramètres de la requête sous forme d'une liste de définitions
+ d'éléments de formulaire de type hidden
+ """
+ query = self.get_query(p, to)
+ Hidden = web.form.Hidden
+ inputs = []
+ for name, value in query.items():
+ if isinstance(value, list):
+ for value in value:
+ inputs.append(Hidden(name=name, value=value).render())
+ else:
+ inputs.append(Hidden(name=name, value=value).render())
+ return u"\n".join(inputs)
+ inputs = property(get_inputs)
+
+class Menu(odict):
+ """Un menu"""
+
+ nextid = None
+ idmap = None
+
+ def __nextid(self):
+ nextid = self.nextid
+ self.nextid += 1
+ return nextid
+
+ def __init__(self, title, mitems=None, profiles=None,
+ id=None, p=None, c=None, **kw):
+ super(Menu, self).__init__(**kw)
+ self.__dict__['nextid'] = 0
+ self.__dict__['idmap'] = {}
+ if title is None: title = u""
+ if id is None: id = self.__nextid()
+ self.title = title
+ self.mitems = []
+ profiles = seqof(profiles, None)
+ self.profiles = profiles
+ self.default_profile = profiles[0] if profiles is not None else None
+ self.id = str(id)
+ self.in_profiles = seqof(p, None)
+ self.css = c
+ self.sel_id = None
+ self.sel_profile = None
+ self.active = False
+ for mitem in seqof(mitems):
+ self.add(mitem)
+
+ def add(self, mitem, *args, **kw):
+ if isinstance(mitem, Menu): pass # sous-menu
+ elif not isinstance(mitem, Action):
+ mitem = Action(mitem, *args, **kw)
+ if mitem.id is None:
+ mitem.id = str(self.__nextid())
+ #mitem.profiles = self.profiles
+ mitem.default_profile = self.default_profile
+ mitem.active = False
+ self.mitems.append(mitem)
+ self.idmap[mitem.id] = mitem
+
+ def reset_selection(self):
+ self.sel_id = None
+ self.sel_profile = self.default_profile
+ self.active = False
+ for mitem in self.mitems:
+ if isinstance(mitem, Menu):
+ mitem.reset_selection()
+ else:
+ mitem.active = False
+
+ def select(self, id, p=None):
+ # d'abord déselectionner tout le monde
+ self.reset_selection()
+ # ensuite chercher le mitem à sélectionner
+ id = str(id)
+ mitem = self.idmap.get(id, None)
+ if mitem is not None:
+ self.sel_id = id
+ if p is None and mitem.in_profiles:
+ p = mitem.in_profiles[0]
+ if p is None: p = self.default_profile
+ self.sel_profile = p
+ mitem.active = self.active = True
+ return True
+ else:
+ for mitem in self.mitems:
+ if not isinstance(mitem, Menu): continue
+ if mitem.select(id):
+ self.sel_id = id
+ self.sel_profile = mitem.sel_profile
+ self.active = True
+ return True
+ return False
+
+ def get_mitem(self, id=None):
+ """retourner l'élément de menu correspondant à la sélection courante
+ """
+ if id is None: id = self.sel_id
+ if id is None: return None
+ id = str(id)
+ mitem = self.idmap.get(id, None)
+ if mitem is not None: return mitem
+ for mitem in self.mitems:
+ if not isinstance(mitem, Menu): continue
+ mitem = mitem.get_mitem(id)
+ if mitem is not None: return mitem
+ return None
+ selection = property(get_mitem)
diff --git a/lib/nulib/python/nulib/words.py b/lib/nulib/python/nulib/words.py
new file mode 100644
index 0000000..93b0b17
--- /dev/null
+++ b/lib/nulib/python/nulib/words.py
@@ -0,0 +1,237 @@
+# -*- coding: utf-8 -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+"""Des fonctions pour gérer les phrases et les mots.
+"""
+
+__all__ = ('enplural', 'plural',
+ 'splitcc', 'joincc', 'splitus', 'joinus',
+ 's2cc', 'cc2s',
+ 's2us', 'us2s',
+ 'cc2us', 'us2cc',
+ )
+
+import string, re
+
+from .base import isseq
+
+RE_LAST_LETTER = re.compile(r'([a-zA-Z])[^a-zA-Z]*$')
+RE_UPPERCASE = re.compile('[A-Z]')
+def __caseof(s, ref):
+ """Transformer s en majuscules ou en minuscules suivant la casse de la
+ dernière lettre de la chaine ref. si ref ne contient aucune lettre,
+ transformer s en minuscules
+ """
+ mo = RE_LAST_LETTER.search(ref)
+ if mo is None: return s.lower()
+ if RE_UPPERCASE.match(mo.group(1)) is not None:
+ # majuscule
+ return s.upper()
+ else:
+ return s.lower()
+
+RE_CHILD = re.compile(r'(?i)child$')
+RE_Y = re.compile(r'(?i)y$')
+RE_X = re.compile(r'(?i)x$')
+RE_S = re.compile(r'(?i)s$')
+def enplural(word):
+ """Retourner la forme plurielle du mot anglais word
+
+ cas particuliers:
+ child --> children
+ *y --> *ies
+ *x --> *xes
+ *s --> *ses
+
+ sinon, on se contente de rajouter un s
+
+ XXX la règle est un peu plus compliquée que cela. l'implémenter
+ correctement. http://en.wikipedia.org/wiki/English_plural
+ """
+ if RE_CHILD.match(word): return word + __caseof('ren', word)
+ elif RE_Y.search(word): return word[:-1] + __caseof('ies', word)
+ elif RE_X.search(word): return word + __caseof('es', word)
+ elif RE_S.search(word): return word + __caseof('es', word)
+ else: return word + __caseof('s', word)
+
+RE_PLURAL = re.compile(r'%(?:<([^#<>]*))?(?:>([^#<>]*))?#')
+
+def plural(text, count, format=True):
+ """Dans text, remplacer des marqueurs de pluralité par la chaine appropriée
+ en fonction de la valeur de count.
+
+ Un marqueur de pluralité est de la forme %[plur]#
+ Par défaut, sing == '' et plur == 's'
+
+ Si count > 1, les marqueurs sont remplacés par plur, sinon ils sont
+ remplacés par sing.
+
+ Si format==True et que text contienne une occurence de %i, le formater avec
+ text % count.
+ """
+ pos = 0
+ while True:
+ mo = RE_PLURAL.search(text, pos)
+ if mo is None: break
+ before = text[:mo.start(0)]
+ after = text[mo.end(0):]
+ if count > 1:
+ plural = mo.group(2)
+ if plural is None: plural = "s"
+ else:
+ plural = mo.group(1)
+ if plural is None: plural = ""
+ pos = len(before) + len(plural)
+ text = before + plural + after
+ if format and text.find("%i") != -1:
+ text = text % count
+ return text
+
+def __split_maybe(s, sep=None):
+ if not isseq(s): s = s.split(sep)
+ return s
+
+ALL_UPPERCASE_PATTERN = re.compile(r'[A-Z0-9]+$')
+UNDERSCORE_PATTERN = re.compile(r'_+')
+UPPERCASE_PATTERN = re.compile(r'([A-Z]+)?([^A-Z]*)')
+
+def splitcc(src, plural=False):
+ """Spliter des mots écrits en CamelCase. La casse des mots n'est pas
+ modifiée.
+
+ Si plural==True, utiliser enplural pour mettre au pluriel le dernier mot.
+
+ e.g.
+ splitcc('camelCase') --> ['camel', 'Case']
+ splitcc('URClass') --> ['UR', 'Class']
+ """
+ src = src.strip()
+ if not src: return []
+
+ parts = UPPERCASE_PATTERN.findall(src)
+ if parts[ - 1] == ('', ''): parts = parts[: - 1]
+
+ dests = []
+ for prefix, suffix in parts:
+ if prefix == "":
+ dests.append(suffix)
+ elif len(prefix) == 1:
+ dests.append(prefix + suffix)
+ else:
+ # len(prefix) > 1
+ dests.append(prefix[: - 1])
+ dests.append(prefix[ - 1] + suffix)
+
+ if dests and plural: dests[-1] = enplural(dests[-1])
+ return dests
+
+def joincc(src, firstcap=False, plural=False):
+ """Joindre des mots en camelCase. Si les mots sont tout en majuscules, il ne
+ sont pas modifiés. Sinon, il sont transformés en minuscule, et la première
+ lettre est capitalisée au besoin.
+
+ Si plural==True, utiliser enplural pour mettre au pluriel le dernier mot.
+
+ e.g.
+ joincc('hello world') --> 'helloWorld'
+ joincc(['hello', 'world']) --> 'helloWorld'
+ joincc('Hello world') --> 'helloWorld'
+ joincc('Hello World') --> 'helloWorld'
+ joincc('HELLO WORLD') --> 'HELLOWORLD'
+ joincc('Hello world', true) --> 'HelloWorld'
+ joincc('Hello World', true) --> 'HelloWorld'
+ joincc('HELLO WORLD', true) --> 'HELLOWORLD'
+ """
+ dests = []
+ first = True
+ for s in __split_maybe(src):
+ if ALL_UPPERCASE_PATTERN.match(s) is None:
+ if first and not firstcap:
+ s = s.lower()
+ else:
+ s = s.capitalize()
+ dests.append(s)
+ first = False
+
+ if dests and plural: dests[-1] = enplural(dests[-1])
+ return ''.join(dests)
+
+def splitus(src, plural=False):
+ """Splitter des mots écrits séparés par des '_'. La casse des mots n'est pas
+ modifiée.
+
+ Si plural==True, utiliser enplural pour mettre au pluriel le dernier mot.
+
+ e.g.
+ splitus('under_score') --> ['under', 'score']
+ splitus('UR_CLASS') --> ['UR', 'CLASS']
+ """
+ src = src.strip()
+ if not src: return []
+
+ dests = UNDERSCORE_PATTERN.split(src)
+ if dests and plural: dests[-1] = enplural(dests[-1])
+ return dests
+
+def joinus(src, plural=False):
+ """Joindre des mots en les séparant par des '_'. Si les mots sont tout en
+ majuscules, il ne sont pas modifiés. Sinon, il sont transformés en
+ minuscule avant d'être séparés par '_'.
+
+ Si plural==True, utiliser enplural pour mettre au pluriel le dernier mot.
+
+ e.g.
+ joinus('hello world') --> 'hello_world'
+ joinus(['hello', 'world']) --> 'hello_world'
+ joinus('Hello world') --> 'hello_world'
+ joinus('Hello World') --> 'hello_world'
+ joinus('HELLO WORLD') --> 'HELLO_WORLD'
+ """
+ dests = []
+ for s in __split_maybe(src):
+ if ALL_UPPERCASE_PATTERN.match(s) is None:
+ s = s.lower()
+ dests.append(s)
+
+ if dests and plural: dests[-1] = enplural(dests[-1])
+ return '_'.join(dests)
+
+
+def s2cc(s, firstcap=False, sep=None, plural=False):
+ """Transformer une suite de mots séparés par sep en une suite de mots en camelCase.
+ e.g. s2cc('hello world') --> 'helloWorld'
+ """
+ dests = __split_maybe(s, sep)
+ if dests and plural: dests[-1] = enplural(dests[-1])
+ return joincc(dests, firstcap)
+
+def cc2s(cc, sep=None, plural=False):
+ """Transformer une suite de mots en camelCase en une suite de mots séparés par sep.
+ e.g. cc2s('helloWorld') --> 'hello world'
+ """
+ return (sep or ' ').join(map(string.lower, splitcc(cc, plural)))
+
+def s2us(s, sep=None, plural=False):
+ """Transformer une suite de mots séparés par sep en une suite de mots séparés par '_'.
+ e.g. s2cc('hello world') --> 'hello_world'
+ """
+ dests = __split_maybe(s, sep)
+ if dests and plural: dests[-1] = enplural(dests[-1])
+ return joinus(dests)
+
+def us2s(us, sep=None, plural=False):
+ """Transformer une suite de mots séparés par '_' en une suite de mots séparés par sep.
+ e.g. cc2s('hello_world') --> 'hello world'
+ """
+ return (sep or ' ').join(map(string.lower, splitus(us, plural)))
+
+def cc2us(cc, plural=False):
+ """Transformer une suite de mots en camelCase en une suite de mots séparés par '_'.
+ e.g. cc2s('helloWorld') --> 'hello_world'
+ """
+ return joinus(splitcc(cc, plural))
+
+def us2cc(us, firstcap=False, plural=False):
+ """Transformer une suite de mots séparés par '_' en une suite de mots en camelCase.
+ e.g. cc2s('hello_world') --> 'helloWorld'
+ """
+ return joincc(splitus(us, plural), firstcap)
diff --git a/lib/nulib/setup.py b/lib/nulib/setup.py
new file mode 100755
index 0000000..908b278
--- /dev/null
+++ b/lib/nulib/setup.py
@@ -0,0 +1,94 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+NAME = 'nulib'
+VERSION = None
+DESCRIPTION = 'Librairies python de nulib'
+AUTHOR = 'Jephte CLAIN'
+EMAIL = 'Jephte.Clain@univ-reunion.fr'
+MODULES = ()
+SRCDIR = 'python'
+PACKAGE_DIR = {'': SRCDIR}
+PACKAGE_DATA = {}
+PACKAGES = []
+SCRIPTS = []
+
+import os, sys, re, fnmatch, tempfile, atexit, shutil
+from os import path
+NULIBDIR = path.dirname(path.abspath(__file__))
+
+################################################################################
+
+RE_VERSION = re.compile(r'(\d+(?:\.\d+)*)(?:-r(\d+/\d+/\d+))?')
+def get_version(basedir=None):
+ if basedir is None:
+ basedir = path.split(path.abspath(sys.argv[0]))[0]
+ version_txt = path.join(basedir, 'VERSION.txt')
+ if not path.isfile(version_txt): return ''
+ try:
+ inf = open(version_txt, 'rb')
+ try: line = inf.readline()
+ finally: inf.close()
+ except:
+ return ''
+ mo = RE_VERSION.match(line)
+ if not mo: return ''
+ return mo.group(1)
+
+def findf(spec, bp):
+ """Transformer le package bp en chemin, puis chercher récursivement les
+ fichiers correspondant à la spécification spec à partir de SRCDIR/bp
+ """
+ files = []
+ bp = bp.replace(".", "/")
+ bpdir = path.join(SRCDIR, bp)
+ bpnames = os.listdir(bpdir)
+ for specname in fnmatch.filter(bpnames, spec):
+ specfile = path.join(bpdir, specname)
+ if path.isfile(specfile):
+ files.append(specname)
+ else:
+ for dirpath, dirnames, filenames in os.walk(specfile):
+ dirnames.remove('.svn')
+ dirpath = dirpath[len(bpdir)+1:]
+ files.extend([path.join(dirpath, filename) for filename in filenames])
+ return files
+
+def fixp(p, bp):
+ """Transformer le package bp en chemin, puis exprimer le chemin relatif p
+ par rapport au chemin du package, puis ajouter SRCDIR/ devant le chemin
+ """
+ bp = bp.replace(".", "/")
+ return path.join(SRCDIR, bp, p)
+def addp(name, data=()):
+ """Ajouter un package, avec ses fichiers de données
+ """
+ global PACKAGES, PACKAGE_DATA
+ PACKAGES.append(name)
+ if data:
+ files = []
+ for spec in data:
+ files.extend(findf(spec, name))
+ PACKAGE_DATA[name] = files
+def adds(name, scripts=()):
+ """Ajouter des scripts contenus dans un package
+ """
+ global SCRIPTS
+ if scripts:
+ SCRIPTS.extend(map(lambda s: fixp(s, name), scripts))
+
+if VERSION is None: VERSION = get_version()
+#addp(module)
+
+if __name__ == '__main__':
+ from distutils.core import setup
+ setup(name=NAME, version=VERSION,
+ description=DESCRIPTION, author=AUTHOR, author_email=EMAIL,
+ py_modules=MODULES,
+ package_dir=PACKAGE_DIR, package_data=PACKAGE_DATA, packages=PACKAGES,
+ scripts=SCRIPTS,
+ )
+ # nettoyer le répertoire build/ en cas de succès
+ builddir = path.join(NULIBDIR, "build")
+ if path.isdir(builddir):
+ atexit.register(shutil.rmtree, builddir)
diff --git a/lib/nulib/ssh-wrapper/rsync b/lib/nulib/ssh-wrapper/rsync
new file mode 100755
index 0000000..81bbdb6
--- /dev/null
+++ b/lib/nulib/ssh-wrapper/rsync
@@ -0,0 +1,22 @@
+#!/bin/bash
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+# wrapper permettant d'utiliser GIT_SSH avec des outils comme git-annex
+# La variable NULIB_GIT_FORCE_PATH doit être définie: c'est la valeur de PATH à
+# utiliser.
+args=()
+if [ -n "$NULIB_GIT_FORCE_SSH" ]; then
+ # corriger la valeur de l'argument -e s'il existe
+ while [ $# -gt 0 ]; do
+ if [ "$1" == -e ]; then
+ args=("${args[@]}" -e "$NULIB_GIT_FORCE_SSH ${2#* }")
+ shift; shift
+ break
+ fi
+ args=("${args[@]}" "$1")
+ shift
+ done
+fi
+args=("${args[@]}" "$@")
+[ -n "$NULIB_GIT_FORCE_PATH" ] && export PATH="$NULIB_GIT_FORCE_PATH"
+export NULIB_SSH_RSYNC_SUPPORT=1
+exec rsync "${args[@]}"
diff --git a/lib/nulib/ssh-wrapper/ssh b/lib/nulib/ssh-wrapper/ssh
new file mode 100755
index 0000000..72989d3
--- /dev/null
+++ b/lib/nulib/ssh-wrapper/ssh
@@ -0,0 +1,9 @@
+#!/bin/bash
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+# wrapper permettant d'utiliser GIT_SSH avec des outils comme git-annex
+# La variable NULIB_GIT_FORCE_PATH doit être définie: c'est la valeur de PATH à
+# utiliser. La variable NULIB_GIT_FORCE_SSH peut être définie: c'est le nom de
+# l'exécutable à utiliser pour lancer ssh.
+[ -n "$NULIB_GIT_FORCE_PATH" ] && export PATH="$NULIB_GIT_FORCE_PATH"
+[ -n "$NULIB_GIT_FORCE_SSH" ] && export GIT_SSH="$NULIB_GIT_FORCE_SSH"
+exec "${NULIB_GIT_FORCE_SSH:-ssh}" "$@"
diff --git a/lib/nulib/templates/webpyapp/.devel b/lib/nulib/templates/webpyapp/.devel
new file mode 100644
index 0000000..e69de29
diff --git a/lib/nulib/templates/webpyapp/.gitignore b/lib/nulib/templates/webpyapp/.gitignore
new file mode 100644
index 0000000..539da74
--- /dev/null
+++ b/lib/nulib/templates/webpyapp/.gitignore
@@ -0,0 +1 @@
+*.py[co]
diff --git a/lib/nulib/templates/webpyapp/appshell b/lib/nulib/templates/webpyapp/appshell
new file mode 100755
index 0000000..696633c
--- /dev/null
+++ b/lib/nulib/templates/webpyapp/appshell
@@ -0,0 +1,47 @@
+#!/bin/bash
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+scriptdir="$(dirname -- "$0")"; scriptdir="$(cd "$scriptdir"; pwd)"
+cd "$scriptdir"
+appname="$(basename -- "$(pwd)")"
+
+if [ -n "$NULIBDIR" -a "$NULIBDIR" == "$NULIBINIT" ]; then
+ : # nulib est déjà chargé
+elif [ -f "$scriptdir/nulib/load.sh" ]; then
+ # mode développement
+ source "$scriptdir/nulib/load.sh"
+elif [ -f /etc/nulib.sh ]; then
+ # mode production
+ source /etc/nulib.sh
+else
+ echo "ERROR: impossible de trouver nulib" 1>&2
+ exit 1
+fi
+
+# Environnement
+export BASEDIR="$scriptdir"
+[ -n "$PYTHONPATH" ] && PYTHONPATH=":$PYTHONPATH"
+export PYTHONPATH="$BASEDIR/config:$BASEDIR/python$PYTHONPATH"
+
+# Paramètres
+DEFAULT_PYTHON=python2.7
+
+SLPATH=
+VIRTUAL_ENV=
+PYTHON=
+source "$BASEDIR/config/server.conf"
+if [ -n "$VIRTUAL_ENV" ]; then
+ PYTHON="$VIRTUAL_ENV/bin/python"
+elif [ -z "$PYTHON" ]; then
+ PYTHON="$DEFAULT_PYTHON"
+fi
+
+# Répertoires contenant des librairies Python supplémentaires
+if [ -n "$SLPATH" ]; then
+ export PYTHONPATH="$PYTHONPATH:$SLPATH"
+ [ -n "$LD_LIBRARY_PATH" ] && LD_LIBRARY_PATH="$LD_LIBRARY_PATH:"
+ export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$SLPATH"
+fi
+
+#
+echo ">>> Shell Python pour $appname"
+exec "$PYTHON" -i -c "$(<"$BASEDIR/config/appshell.py")"
diff --git a/lib/nulib/templates/webpyapp/config/appshell.py b/lib/nulib/templates/webpyapp/config/appshell.py
new file mode 100644
index 0000000..3e5df07
--- /dev/null
+++ b/lib/nulib/templates/webpyapp/config/appshell.py
@@ -0,0 +1,9 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+# initialisation pour appshell
+
+import sys, os
+from os import path
+import pdb
+
+from nulib.web import web
+from app import config
diff --git a/lib/nulib/templates/webpyapp/config/config.py b/lib/nulib/templates/webpyapp/config/config.py
new file mode 100644
index 0000000..8150f17
--- /dev/null
+++ b/lib/nulib/templates/webpyapp/config/config.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+# Configuration de l'application
+
+# Note: remplacer le cas échéant DBNAME par le nom de la base de données
+
+# configuration de base
+BASE_CONFIG = dict(
+ #migrate=True, # auto-migration au démarrage de l'application
+ #dbtype="sqlite", dbname="DBNAME", dbuser=None, dbpass=None,
+)
+
+# configuration spécifique à la prod
+PROD_CONFIG = dict(
+ # configuration de prod
+ #dbtype="mysql", dbuser="DBNAME", dbpass="",
+)
+
+# profil courant
+PROFILE = None
+
+# profils valides
+PROFILES = dict(
+ prod=PROD_CONFIG,
+)
diff --git a/lib/nulib/templates/webpyapp/config/server.conf b/lib/nulib/templates/webpyapp/config/server.conf
new file mode 100644
index 0000000..e1aeca4
--- /dev/null
+++ b/lib/nulib/templates/webpyapp/config/server.conf
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+# Nom de l'application
+#NAME=webpyapp
+
+# adresse d'écoute
+#HOST=0.0.0.0
+
+# port d'écoute
+#PORT=7080
+
+# faut-il activer le mode debug?
+#DEBUG=1
+
+# Profil de configuration
+#PROFILE=prod
+
+################################################################################
+# Options avancées
+
+# Utilisateur qui fait tourner le service
+#OWNER=root
+
+# Répertoires à rajouter à PYTHONPATH et LD_LIBRARY_PATH
+#SLPATH=/opt/oracle/instantclient
+
+# Environnement virtuel à utiliser. Il est créé s'il n'existe pas
+#VIRTUAL_ENV=/opt/webpython
+
+# Interpréteur python à utiliser. Si VIRTUAL_ENV est défini, c'est cet
+# interpréteur qui est utilisé pour créer l'environnement virtuel.
+#PYTHON=python2.7
diff --git a/lib/nulib/templates/webpyapp/nulib b/lib/nulib/templates/webpyapp/nulib
new file mode 120000
index 0000000..c25bddb
--- /dev/null
+++ b/lib/nulib/templates/webpyapp/nulib
@@ -0,0 +1 @@
+../..
\ No newline at end of file
diff --git a/lib/nulib/templates/webpyapp/python/app/__init__.py b/lib/nulib/templates/webpyapp/python/app/__init__.py
new file mode 100644
index 0000000..08d8771
--- /dev/null
+++ b/lib/nulib/templates/webpyapp/python/app/__init__.py
@@ -0,0 +1,12 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = (
+ 'web', 'Page', 'bs',
+ 'nocache', 'auth', 'defaults',
+ 'reset_session', 'set_session', 'session', 'check_session',
+ 'config',
+)
+
+from nulib.web import web, Page, bs, nocache, auth, defaults
+from nulib.web import reset_session, set_session, session, check_session
+from nulib.web.config_loader import config
diff --git a/lib/nulib/templates/webpyapp/python/app/server.py b/lib/nulib/templates/webpyapp/python/app/server.py
new file mode 100644
index 0000000..5c31057
--- /dev/null
+++ b/lib/nulib/templates/webpyapp/python/app/server.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ('Server',)
+
+import sys, os
+from os import path
+
+BASEDIR = os.environ.get("BASEDIR", None)
+if BASEDIR is None:
+ SCRIPTDIR = path.abspath(path.split(__file__)[0])
+ BASEDIR = path.dirname(path.dirname(SCRIPTDIR))
+sys.path.insert(0, path.join(BASEDIR, 'config'))
+sys.path.insert(1, path.join(BASEDIR, 'python'))
+
+from nulib.paths import mkdirp
+from nulib.web import web, Application
+from app import config
+from pages import *
+
+class Server(Application):
+ # décommenter les lignes suivantes pour activer les sessions
+ #def _new_session(self):
+ # sdir = path.join(self.basedir, 'var/sessions')
+ # mkdirp(sdir)
+ # return web.session.DiskStore(sdir), {}
+
+ def before_start(self):
+ super(Server, self).before_start()
+ config.set_appname(self.NAME)
+ config.set_basedir(self.basedir)
+ config.set_profile(self.PROFILE)
+ #if config.migrate:
+ # from model import data
+ # data.migration.migrate()
+
+if __name__ == '__main__':
+ Server(BASEDIR).run(sys.argv[1:])
diff --git a/lib/nulib/templates/webpyapp/python/model/__init__.py b/lib/nulib/templates/webpyapp/python/model/__init__.py
new file mode 100644
index 0000000..e59d78a
--- /dev/null
+++ b/lib/nulib/templates/webpyapp/python/model/__init__.py
@@ -0,0 +1,7 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = (
+ #'datactl',
+)
+
+#from .data import datactl
diff --git a/lib/nulib/templates/webpyapp/python/model/data.py b/lib/nulib/templates/webpyapp/python/model/data.py
new file mode 100644
index 0000000..9ea8339
--- /dev/null
+++ b/lib/nulib/templates/webpyapp/python/model/data.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ('db',)
+
+from os import path
+
+from nulib.web import lazydb, incd, excd, Row, RowCtl, Migration
+
+db = lazydb()
+
+class DataMigration(Migration):
+ DB = db
+
+ def version0(self):
+ # my(file) est utilisé pour accéder à des fichiers resources
+ mydir = path.dirname(__file__)
+ my = lambda filename: path.join(mydir, filename)
+migration = DataMigration()
diff --git a/lib/nulib/templates/webpyapp/python/pages/T.py b/lib/nulib/templates/webpyapp/python/pages/T.py
new file mode 100644
index 0000000..d439e33
--- /dev/null
+++ b/lib/nulib/templates/webpyapp/python/pages/T.py
@@ -0,0 +1,8 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ()
+
+from app import *
+
+class T(Page):
+ PREFIX = r'/T'
diff --git a/lib/nulib/templates/webpyapp/python/pages/__init__.py b/lib/nulib/templates/webpyapp/python/pages/__init__.py
new file mode 100644
index 0000000..8864c2b
--- /dev/null
+++ b/lib/nulib/templates/webpyapp/python/pages/__init__.py
@@ -0,0 +1,6 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ()
+
+# à importer en dernier
+from . import main_page
diff --git a/lib/nulib/templates/webpyapp/python/pages/main_page.py b/lib/nulib/templates/webpyapp/python/pages/main_page.py
new file mode 100644
index 0000000..7895744
--- /dev/null
+++ b/lib/nulib/templates/webpyapp/python/pages/main_page.py
@@ -0,0 +1,8 @@
+# -*- coding: utf-8 mode: python -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+
+__all__ = ()
+
+from app import *
+
+class main(Page):
+ PREFIX = r'/'
diff --git a/lib/nulib/templates/webpyapp/setup-devel.sh b/lib/nulib/templates/webpyapp/setup-devel.sh
new file mode 100755
index 0000000..0a67518
--- /dev/null
+++ b/lib/nulib/templates/webpyapp/setup-devel.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+source /etc/ulibauto || exit 1
+urequire conf
+
+function display_help() {
+ uecho "$scriptname: initialiser un environnement de développement
+
+USAGE
+ $scriptname [destdir]
+
+OPTIONS
+ -n, --name NAME
+ Spécifier le nom du projet"
+}
+
+name=
+args=(
+ --help '$exit_with display_help'
+ -n:,--name: name=
+)
+parse_args "$@"; set -- "${args[@]}"
+
+destdir="${1:-.}"
+confopt="${name:+-i}"
+
+read_value $confopt "Veuillez entrer le nom du projet" name "$name"
+
+dest="$destdir/$name"
+[ -d "$dest" ] && die "Le répertoire $(ppath "$dest") existe déjà"
+
+einfo "Vous allez créer un nouveau projet $name dans le répertoire $(ppath "$destdir")"
+ask_yesno "Voulez-vous continuer?" O || die
+
+estep "Copie des fichiers"
+rsync -a --exclude "/$scriptname" "$scriptdir/" "$dest" || die
+
+setx nulibdir=readlink "$scriptdir/nulib"
+setx nulibdir=abspath "$scriptdir/$nulibdir"
+setx nulibdir=relpath "$nulibdir" "$dest"
+
+estep "Correction du lien nulib"
+ln -sfT "$nulibdir" "$dest/nulib"
+
+estep "Maj de la configuration"
+conf_enable "$dest/config/server.conf" NAME="$name"
+
+enote "Fin de la copie. Vous pouvez maintenant configurer $name/config/server.conf"
diff --git a/lib/nulib/templates/webpyapp/startserver b/lib/nulib/templates/webpyapp/startserver
new file mode 100755
index 0000000..c824654
--- /dev/null
+++ b/lib/nulib/templates/webpyapp/startserver
@@ -0,0 +1,44 @@
+#!/bin/bash
+# -*- coding: utf-8 mode: sh -*- vim:sw=4:sts=4:et:ai:si:sta:fenc=utf-8
+scriptdir="$(dirname -- "$0")"; scriptdir="$(cd "$scriptdir"; pwd)"
+
+if [ -n "$NULIBDIR" -a "$NULIBDIR" == "$NULIBINIT" ]; then
+ : # nulib est déjà chargé
+elif [ -f "$scriptdir/nulib/load.sh" ]; then
+ # mode développement
+ source "$scriptdir/nulib/load.sh"
+elif [ -f /etc/nulib.sh ]; then
+ # mode production
+ source /etc/nulib.sh
+else
+ echo "ERROR: impossible de trouver nulib" 1>&2
+ exit 1
+fi
+
+# Environnement
+export BASEDIR="$scriptdir"
+[ -n "$PYTHONPATH" ] && PYTHONPATH=":$PYTHONPATH"
+export PYTHONPATH="$BASEDIR/config:$BASEDIR/python$PYTHONPATH"
+
+# Paramètres
+DEFAULT_PYTHON=python2.7
+
+SLPATH=
+VIRTUAL_ENV=
+PYTHON=
+source "$BASEDIR/config/server.conf"
+if [ -n "$VIRTUAL_ENV" ]; then
+ PYTHON="$VIRTUAL_ENV/bin/python"
+elif [ -z "$PYTHON" ]; then
+ PYTHON="$DEFAULT_PYTHON"
+fi
+
+# Répertoires contenant des librairies Python supplémentaires
+if [ -n "$SLPATH" ]; then
+ export PYTHONPATH="$PYTHONPATH:$SLPATH"
+ [ -n "$LD_LIBRARY_PATH" ] && LD_LIBRARY_PATH="$LD_LIBRARY_PATH:"
+ export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$SLPATH"
+fi
+
+#
+exec "$PYTHON" -m app.server "$@"
diff --git a/lib/nulib/templates/webpyapp/static/bootstrap/T.html b/lib/nulib/templates/webpyapp/static/bootstrap/T.html
new file mode 100644
index 0000000..ea6f71b
--- /dev/null
+++ b/lib/nulib/templates/webpyapp/static/bootstrap/T.html
@@ -0,0 +1,24 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+Template
+
+
+