Merge branch 'master' into core-updates
This commit is contained in:
commit
763a401ed1
|
@ -1,3 +0,0 @@
|
||||||
[submodule "nix-upstream"]
|
|
||||||
path = nix-upstream
|
|
||||||
url = https://github.com/NixOS/nix.git
|
|
12
bootstrap
12
bootstrap
|
@ -1,15 +1,5 @@
|
||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
# Create the build system.
|
||||||
# Import missing source files and create the build system.
|
|
||||||
|
|
||||||
set -e -x
|
set -e -x
|
||||||
|
|
||||||
top_srcdir="$PWD"
|
|
||||||
export top_srcdir
|
|
||||||
|
|
||||||
git submodule init
|
|
||||||
git submodule update
|
|
||||||
|
|
||||||
./nix/sync-with-upstream
|
|
||||||
|
|
||||||
exec autoreconf -vfi
|
exec autoreconf -vfi
|
||||||
|
|
|
@ -4177,10 +4177,91 @@ tool suite.)
|
||||||
the ``message of the day''.
|
the ``message of the day''.
|
||||||
@end deffn
|
@end deffn
|
||||||
|
|
||||||
@deffn {Monadic Procedure} nscd-service [#:glibc glibc]
|
@cindex name service cache daemon
|
||||||
Return a service that runs libc's name service cache daemon (nscd).
|
@cindex nscd
|
||||||
|
@deffn {Monadic Procedure} nscd-service [@var{config}] [#:glibc glibc]
|
||||||
|
Return a service that runs libc's name service cache daemon (nscd) with the
|
||||||
|
given @var{config}---an @code{<nscd-configuration>} object.
|
||||||
@end deffn
|
@end deffn
|
||||||
|
|
||||||
|
@defvr {Scheme Variable} %nscd-default-configuration
|
||||||
|
This is the default @code{<nscd-configuration>} value (see below) used
|
||||||
|
by @code{nscd-service}. This uses the caches defined by
|
||||||
|
@var{%nscd-default-caches}; see below.
|
||||||
|
@end defvr
|
||||||
|
|
||||||
|
@deftp {Data Type} nscd-configuration
|
||||||
|
This is the type representing the name service cache daemon (nscd)
|
||||||
|
configuration.
|
||||||
|
|
||||||
|
@table @asis
|
||||||
|
|
||||||
|
@item @code{log-file} (default: @code{"/var/log/nscd.log"})
|
||||||
|
Name of nscd's log file. This is where debugging output goes when
|
||||||
|
@code{debug-level} is strictly positive.
|
||||||
|
|
||||||
|
@item @code{debug-level} (default: @code{0})
|
||||||
|
Integer denoting the debugging levels. Higher numbers mean more
|
||||||
|
debugging output is logged.
|
||||||
|
|
||||||
|
@item @code{caches} (default: @var{%nscd-default-caches})
|
||||||
|
List of @code{<nscd-cache>} objects denoting things to be cached; see
|
||||||
|
below.
|
||||||
|
|
||||||
|
@end table
|
||||||
|
@end deftp
|
||||||
|
|
||||||
|
@deftp {Data Type} nscd-cache
|
||||||
|
Data type representing a cache database of nscd and its parameters.
|
||||||
|
|
||||||
|
@table @asis
|
||||||
|
|
||||||
|
@item @code{database}
|
||||||
|
This is a symbol representing the name of the database to be cached.
|
||||||
|
Valid values are @code{passwd}, @code{group}, @code{hosts}, and
|
||||||
|
@code{services}, which designate the corresponding NSS database
|
||||||
|
(@pxref{NSS Basics,,, libc, The GNU C Library Reference Manual}).
|
||||||
|
|
||||||
|
@item @code{positive-time-to-live}
|
||||||
|
@itemx @code{negative-time-to-live} (default: @code{20})
|
||||||
|
A number representing the number of seconds during which a positive or
|
||||||
|
negative lookup result remains in cache.
|
||||||
|
|
||||||
|
@item @code{check-files?} (default: @code{#t})
|
||||||
|
Whether to check for updates of the files corresponding to
|
||||||
|
@var{database}.
|
||||||
|
|
||||||
|
For instance, when @var{database} is @code{hosts}, setting this flag
|
||||||
|
instructs nscd to check for updates in @file{/etc/hosts} and to take
|
||||||
|
them into account.
|
||||||
|
|
||||||
|
@item @code{persistent?} (default: @code{#t})
|
||||||
|
Whether the cache should be stored persistently on disk.
|
||||||
|
|
||||||
|
@item @code{shared?} (default: @code{#t})
|
||||||
|
Whether the cache should be shared among users.
|
||||||
|
|
||||||
|
@item @code{max-database-size} (default: 32@tie{}MiB)
|
||||||
|
Maximum size in bytes of the database cache.
|
||||||
|
|
||||||
|
@c XXX: 'suggested-size' and 'auto-propagate?' seem to be expert
|
||||||
|
@c settings, so leave them out.
|
||||||
|
|
||||||
|
@end table
|
||||||
|
@end deftp
|
||||||
|
|
||||||
|
@defvr {Scheme Variable} %nscd-default-caches
|
||||||
|
List of @code{<nscd-cache>} objects used by default by
|
||||||
|
@code{nscd-configuration} (see above.)
|
||||||
|
|
||||||
|
It enables persistent and aggressive caching of service and host name
|
||||||
|
lookups. The latter provides better host name lookup performance,
|
||||||
|
resilience in the face of unreliable name servers, and also better
|
||||||
|
privacy---often the result of host name lookups is in local cache, so
|
||||||
|
external name servers do not even need to be queried.
|
||||||
|
@end defvr
|
||||||
|
|
||||||
|
|
||||||
@deffn {Monadic Procedure} syslog-service
|
@deffn {Monadic Procedure} syslog-service
|
||||||
Return a service that runs @code{syslogd} with reasonable default
|
Return a service that runs @code{syslogd} with reasonable default
|
||||||
settings.
|
settings.
|
||||||
|
|
|
@ -96,6 +96,7 @@ GNU_SYSTEM_MODULES = \
|
||||||
gnu/packages/freeipmi.scm \
|
gnu/packages/freeipmi.scm \
|
||||||
gnu/packages/ftp.scm \
|
gnu/packages/ftp.scm \
|
||||||
gnu/packages/fribidi.scm \
|
gnu/packages/fribidi.scm \
|
||||||
|
gnu/packages/game-development.scm \
|
||||||
gnu/packages/games.scm \
|
gnu/packages/games.scm \
|
||||||
gnu/packages/gawk.scm \
|
gnu/packages/gawk.scm \
|
||||||
gnu/packages/gcal.scm \
|
gnu/packages/gcal.scm \
|
||||||
|
@ -377,6 +378,15 @@ dist_patch_DATA = \
|
||||||
gnu/packages/patches/guix-test-networking.patch \
|
gnu/packages/patches/guix-test-networking.patch \
|
||||||
gnu/packages/patches/gtkglext-disable-disable-deprecated.patch \
|
gnu/packages/patches/gtkglext-disable-disable-deprecated.patch \
|
||||||
gnu/packages/patches/hop-bigloo-4.0b.patch \
|
gnu/packages/patches/hop-bigloo-4.0b.patch \
|
||||||
|
gnu/packages/patches/icecat-CVE-2014-1587-bug-1042567.patch \
|
||||||
|
gnu/packages/patches/icecat-CVE-2014-1587-bug-1072847.patch \
|
||||||
|
gnu/packages/patches/icecat-CVE-2014-1587-bug-1079729.patch \
|
||||||
|
gnu/packages/patches/icecat-CVE-2014-1587-bug-1080312.patch \
|
||||||
|
gnu/packages/patches/icecat-CVE-2014-1587-bug-1089207.patch \
|
||||||
|
gnu/packages/patches/icecat-CVE-2014-1590.patch \
|
||||||
|
gnu/packages/patches/icecat-CVE-2014-1592.patch \
|
||||||
|
gnu/packages/patches/icecat-CVE-2014-1593.patch \
|
||||||
|
gnu/packages/patches/icecat-CVE-2014-1594.patch \
|
||||||
gnu/packages/patches/inkscape-stray-comma.patch \
|
gnu/packages/patches/inkscape-stray-comma.patch \
|
||||||
gnu/packages/patches/jbig2dec-ignore-testtest.patch \
|
gnu/packages/patches/jbig2dec-ignore-testtest.patch \
|
||||||
gnu/packages/patches/kmod-module-directory.patch \
|
gnu/packages/patches/kmod-module-directory.patch \
|
||||||
|
@ -450,6 +460,7 @@ dist_patch_DATA = \
|
||||||
gnu/packages/patches/wmctrl-64-fix.patch \
|
gnu/packages/patches/wmctrl-64-fix.patch \
|
||||||
gnu/packages/patches/xf86-input-synaptics-glibc-2.20.patch \
|
gnu/packages/patches/xf86-input-synaptics-glibc-2.20.patch \
|
||||||
gnu/packages/patches/xf86-video-openchrome-includes.patch \
|
gnu/packages/patches/xf86-video-openchrome-includes.patch \
|
||||||
|
gnu/packages/patches/xfce4-panel-plugins.patch \
|
||||||
gnu/packages/patches/xmodmap-asprintf.patch
|
gnu/packages/patches/xmodmap-asprintf.patch
|
||||||
|
|
||||||
bootstrapdir = $(guilemoduledir)/gnu/packages/bootstrap
|
bootstrapdir = $(guilemoduledir)/gnu/packages/bootstrap
|
||||||
|
|
|
@ -105,24 +105,29 @@
|
||||||
(append environment `((,%distro-root-directory . "gnu/packages"))))))
|
(append environment `((,%distro-root-directory . "gnu/packages"))))))
|
||||||
|
|
||||||
(define* (scheme-files directory)
|
(define* (scheme-files directory)
|
||||||
"Return the list of Scheme files found under DIRECTORY."
|
"Return the list of Scheme files found under DIRECTORY, recursively. The
|
||||||
(file-system-fold (const #t) ; enter?
|
returned list is sorted in alphabetical order."
|
||||||
(lambda (path stat result) ; leaf
|
|
||||||
(if (string-suffix? ".scm" path)
|
;; Sort entries so that 'fold-packages' works in a deterministic fashion
|
||||||
(cons path result)
|
;; regardless of details of the underlying file system.
|
||||||
result))
|
(sort (file-system-fold (const #t) ; enter?
|
||||||
(lambda (path stat result) ; down
|
(lambda (path stat result) ; leaf
|
||||||
result)
|
(if (string-suffix? ".scm" path)
|
||||||
(lambda (path stat result) ; up
|
(cons path result)
|
||||||
result)
|
result))
|
||||||
(const #f) ; skip
|
(lambda (path stat result) ; down
|
||||||
(lambda (path stat errno result)
|
result)
|
||||||
(warning (_ "cannot access `~a': ~a~%")
|
(lambda (path stat result) ; up
|
||||||
path (strerror errno))
|
result)
|
||||||
result)
|
(const #f) ; skip
|
||||||
'()
|
(lambda (path stat errno result)
|
||||||
directory
|
(warning (_ "cannot access `~a': ~a~%")
|
||||||
stat))
|
path (strerror errno))
|
||||||
|
result)
|
||||||
|
'()
|
||||||
|
directory
|
||||||
|
stat)
|
||||||
|
string<?))
|
||||||
|
|
||||||
(define file-name->module-name
|
(define file-name->module-name
|
||||||
(let ((not-slash (char-set-complement (char-set #\/))))
|
(let ((not-slash (char-set-complement (char-set #\/))))
|
||||||
|
|
|
@ -28,6 +28,113 @@
|
||||||
#:use-module (gnu packages pkg-config)
|
#:use-module (gnu packages pkg-config)
|
||||||
#:use-module (gnu packages python))
|
#:use-module (gnu packages python))
|
||||||
|
|
||||||
|
(define-public bedtools
|
||||||
|
(package
|
||||||
|
(name "bedtools")
|
||||||
|
(version "2.22.0")
|
||||||
|
(source (origin
|
||||||
|
(method url-fetch)
|
||||||
|
(uri (string-append "https://github.com/arq5x/bedtools2/archive/v"
|
||||||
|
version ".tar.gz"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"16aq0w3dmbd0853j32xk9jin4vb6v6fgakfyvrsmsjizzbn3fpfl"))))
|
||||||
|
(build-system gnu-build-system)
|
||||||
|
(native-inputs `(("python" ,python-2)))
|
||||||
|
(inputs `(("samtools" ,samtools)
|
||||||
|
("zlib" ,zlib)))
|
||||||
|
(arguments
|
||||||
|
'(#:test-target "test"
|
||||||
|
#:phases
|
||||||
|
(alist-cons-after
|
||||||
|
'unpack 'patch-makefile-SHELL-definition
|
||||||
|
(lambda _
|
||||||
|
;; patch-makefile-SHELL cannot be used here as it does not
|
||||||
|
;; yet patch definitions with `:='. Since changes to
|
||||||
|
;; patch-makefile-SHELL result in a full rebuild, features
|
||||||
|
;; of patch-makefile-SHELL are reimplemented here.
|
||||||
|
(substitute* "Makefile"
|
||||||
|
(("^SHELL := .*$") (string-append "SHELL := " (which "bash") " -e \n"))))
|
||||||
|
(alist-delete
|
||||||
|
'configure
|
||||||
|
(alist-replace
|
||||||
|
'install
|
||||||
|
(lambda* (#:key outputs #:allow-other-keys)
|
||||||
|
(let ((bin (string-append (assoc-ref outputs "out") "/bin/")))
|
||||||
|
(mkdir-p bin)
|
||||||
|
(for-each (lambda (file)
|
||||||
|
(copy-file file (string-append bin (basename file))))
|
||||||
|
(find-files "bin" ".*"))))
|
||||||
|
%standard-phases)))))
|
||||||
|
(home-page "https://github.com/arq5x/bedtools2")
|
||||||
|
(synopsis "Tools for genome analysis and arithmetic")
|
||||||
|
(description
|
||||||
|
"Collectively, the bedtools utilities are a swiss-army knife of tools for
|
||||||
|
a wide-range of genomics analysis tasks. The most widely-used tools enable
|
||||||
|
genome arithmetic: that is, set theory on the genome. For example, bedtools
|
||||||
|
allows one to intersect, merge, count, complement, and shuffle genomic
|
||||||
|
intervals from multiple files in widely-used genomic file formats such as BAM,
|
||||||
|
BED, GFF/GTF, VCF.")
|
||||||
|
(license license:gpl2)))
|
||||||
|
|
||||||
|
(define-public bowtie
|
||||||
|
(package
|
||||||
|
(name "bowtie")
|
||||||
|
(version "2.2.4")
|
||||||
|
(source (origin
|
||||||
|
(method url-fetch)
|
||||||
|
(uri (string-append "https://github.com/BenLangmead/bowtie2/archive/v"
|
||||||
|
version ".tar.gz"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"15dnbqippwvhyh9zqjhaxkabk7lm1xbh1nvar1x4b5kwm117zijn"))
|
||||||
|
(modules '((guix build utils)))
|
||||||
|
(snippet
|
||||||
|
'(substitute* "Makefile"
|
||||||
|
(("^CC = .*$") "CC = gcc")
|
||||||
|
(("^CPP = .*$") "CPP = g++")
|
||||||
|
;; replace BUILD_HOST and BUILD_TIME for deterministic build
|
||||||
|
(("-DBUILD_HOST=.*") "-DBUILD_HOST=\"\\\"guix\\\"\"")
|
||||||
|
(("-DBUILD_TIME=.*") "-DBUILD_TIME=\"\\\"0\\\"\"")))))
|
||||||
|
(build-system gnu-build-system)
|
||||||
|
(inputs `(("perl" ,perl)
|
||||||
|
("perl-clone" ,perl-clone)
|
||||||
|
("perl-test-deep" ,perl-test-deep)
|
||||||
|
("perl-test-simple" ,perl-test-simple)
|
||||||
|
("python" ,python-2)))
|
||||||
|
(arguments
|
||||||
|
'(#:make-flags '("allall")
|
||||||
|
#:phases
|
||||||
|
(alist-delete
|
||||||
|
'configure
|
||||||
|
(alist-replace
|
||||||
|
'install
|
||||||
|
(lambda* (#:key outputs #:allow-other-keys)
|
||||||
|
(let ((bin (string-append (assoc-ref outputs "out") "/bin/")))
|
||||||
|
(mkdir-p bin)
|
||||||
|
(for-each (lambda (file)
|
||||||
|
(copy-file file (string-append bin file)))
|
||||||
|
(find-files "." "bowtie2.*"))))
|
||||||
|
(alist-replace
|
||||||
|
'check
|
||||||
|
(lambda* (#:key outputs #:allow-other-keys)
|
||||||
|
(system* "perl"
|
||||||
|
"scripts/test/simple_tests.pl"
|
||||||
|
"--bowtie2=./bowtie2"
|
||||||
|
"--bowtie2-build=./bowtie2-build"))
|
||||||
|
%standard-phases)))))
|
||||||
|
(home-page "http://bowtie-bio.sourceforge.net/bowtie2/index.shtml")
|
||||||
|
(synopsis "Fast and sensitive nucleotide sequence read aligner")
|
||||||
|
(description
|
||||||
|
"Bowtie 2 is a fast and memory-efficient tool for aligning sequencing
|
||||||
|
reads to long reference sequences. It is particularly good at aligning reads
|
||||||
|
of about 50 up to 100s or 1,000s of characters, and particularly good at
|
||||||
|
aligning to relatively long (e.g. mammalian) genomes. Bowtie 2 indexes the
|
||||||
|
genome with an FM Index to keep its memory footprint small: for the human
|
||||||
|
genome, its memory footprint is typically around 3.2 GB. Bowtie 2 supports
|
||||||
|
gapped, local, and paired-end alignment modes.")
|
||||||
|
(license license:gpl3+)))
|
||||||
|
|
||||||
(define-public samtools
|
(define-public samtools
|
||||||
(package
|
(package
|
||||||
(name "samtools")
|
(name "samtools")
|
||||||
|
@ -43,7 +150,14 @@
|
||||||
"1y5p2hs4gif891b4ik20275a8xf3qrr1zh9wpysp4g8m0g1jckf2"))))
|
"1y5p2hs4gif891b4ik20275a8xf3qrr1zh9wpysp4g8m0g1jckf2"))))
|
||||||
(build-system gnu-build-system)
|
(build-system gnu-build-system)
|
||||||
(arguments
|
(arguments
|
||||||
'(#:make-flags (list (string-append "prefix=" (assoc-ref %outputs "out")))
|
`(;; There are 87 test failures when building on non-64-bit architectures
|
||||||
|
;; due to invalid test data. This has since been fixed upstream (see
|
||||||
|
;; <https://github.com/samtools/samtools/pull/307>), but as there has
|
||||||
|
;; not been a new release we disable the tests for all non-64-bit
|
||||||
|
;; systems.
|
||||||
|
#:tests? ,(string=? (or (%current-system) (%current-target-system))
|
||||||
|
"x86_64-linux")
|
||||||
|
#:make-flags (list (string-append "prefix=" (assoc-ref %outputs "out")))
|
||||||
#:phases
|
#:phases
|
||||||
(alist-cons-after
|
(alist-cons-after
|
||||||
'unpack
|
'unpack
|
||||||
|
|
|
@ -70,6 +70,26 @@ independent of the input data and can be reduced, if necessary, at some cost
|
||||||
in compression.")
|
in compression.")
|
||||||
(license license:zlib)))
|
(license license:zlib)))
|
||||||
|
|
||||||
|
(define-public fastjar
|
||||||
|
(package
|
||||||
|
(name "fastjar")
|
||||||
|
(version "0.98")
|
||||||
|
(source (origin
|
||||||
|
(method url-fetch)
|
||||||
|
(uri (string-append "mirror://savannah/fastjar/fastjar-"
|
||||||
|
version ".tar.gz"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"0iginbz2m15hcsa3x4y7v3mhk54gr1r7m3ghx0pg4n46vv2snmpi"))))
|
||||||
|
(build-system gnu-build-system)
|
||||||
|
(inputs `(("zlib" ,zlib)))
|
||||||
|
(home-page "http://savannah.nongnu.org/projects/fastjar")
|
||||||
|
(synopsis "Replacement for Sun's 'jar' utility")
|
||||||
|
(description
|
||||||
|
"FastJar is an attempt to create a much faster replacement for Sun's 'jar'
|
||||||
|
utility. Instead of being written in Java, FastJar is written in C.")
|
||||||
|
(license license:gpl2+)))
|
||||||
|
|
||||||
(define-public gzip
|
(define-public gzip
|
||||||
(package
|
(package
|
||||||
(name "gzip")
|
(name "gzip")
|
||||||
|
|
|
@ -0,0 +1,48 @@
|
||||||
|
;;; GNU Guix --- Functional package management for GNU
|
||||||
|
;;; Copyright © 2014 Tomáš Čech <sleep_walker@suse.cz>
|
||||||
|
;;;
|
||||||
|
;;; This file is part of GNU Guix.
|
||||||
|
;;;
|
||||||
|
;;; GNU Guix is free software; you can redistribute it and/or modify it
|
||||||
|
;;; under the terms of the GNU General Public License as published by
|
||||||
|
;;; the Free Software Foundation; either version 3 of the License, or (at
|
||||||
|
;;; your option) any later version.
|
||||||
|
;;;
|
||||||
|
;;; GNU Guix is distributed in the hope that it will be useful, but
|
||||||
|
;;; WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
;;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
;;; GNU General Public License for more details.
|
||||||
|
;;;
|
||||||
|
;;; You should have received a copy of the GNU General Public License
|
||||||
|
;;; along with GNU Guix. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
(define-module (gnu packages game-development)
|
||||||
|
#:use-module (guix licenses)
|
||||||
|
#:use-module (guix packages)
|
||||||
|
#:use-module (guix download)
|
||||||
|
#:use-module (guix build-system cmake)
|
||||||
|
#:use-module (gnu packages))
|
||||||
|
|
||||||
|
(define-public bullet
|
||||||
|
(package
|
||||||
|
(name "bullet")
|
||||||
|
(version "2.82-r2704")
|
||||||
|
(source (origin
|
||||||
|
(method url-fetch)
|
||||||
|
(uri (string-append "https://bullet.googlecode.com/files/bullet-"
|
||||||
|
version ".tgz"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"1lnfksxa9b1slyfcxys313ymsllvbsnxh9np06azkbgpfvmwkr37"))))
|
||||||
|
(build-system cmake-build-system)
|
||||||
|
(arguments '(#:tests? #f ; no 'test' target
|
||||||
|
#:configure-flags (list
|
||||||
|
(string-append
|
||||||
|
"-DCMAKE_CXX_FLAGS=-fPIC "
|
||||||
|
(or (getenv "CXXFLAGS") "")))))
|
||||||
|
(home-page "http://bulletphysics.org/")
|
||||||
|
(synopsis "3D physics engine library")
|
||||||
|
(description
|
||||||
|
"Bullet is a physics engine library usable for collision detection. It
|
||||||
|
is used in some video games and movies.")
|
||||||
|
(license zlib)))
|
|
@ -1,5 +1,6 @@
|
||||||
;;; GNU Guix --- Functional package management for GNU
|
;;; GNU Guix --- Functional package management for GNU
|
||||||
;;; Copyright © 2013 Andreas Enge <andreas@enge.fr>
|
;;; Copyright © 2013 Andreas Enge <andreas@enge.fr>
|
||||||
|
;;; Copyright © 2014 Mark H Weaver <mhw@netris.org>
|
||||||
;;;
|
;;;
|
||||||
;;; This file is part of GNU Guix.
|
;;; This file is part of GNU Guix.
|
||||||
;;;
|
;;;
|
||||||
|
@ -33,14 +34,14 @@
|
||||||
(define-public lcms
|
(define-public lcms
|
||||||
(package
|
(package
|
||||||
(name "lcms")
|
(name "lcms")
|
||||||
(version "2.4")
|
(version "2.6")
|
||||||
(source (origin
|
(source (origin
|
||||||
(method url-fetch)
|
(method url-fetch)
|
||||||
(uri (string-append
|
(uri (string-append
|
||||||
"http://downloads.sourceforge.net/project/lcms/lcms/"
|
"http://downloads.sourceforge.net/project/lcms/lcms/"
|
||||||
version "/lcms2-" version ".tar.gz"))
|
version "/lcms2-" version ".tar.gz"))
|
||||||
(sha256 (base32
|
(sha256 (base32
|
||||||
"1s1ppvqaydf2yqc72mw6zfviwxccb311a6hrbi802sgjxw84sl9a"))))
|
"1c8lgq8gfs3nyplvbx9k8wzfj6r2bqi3f611vb1m8z3476454wji"))))
|
||||||
(build-system gnu-build-system)
|
(build-system gnu-build-system)
|
||||||
(inputs `(("libjpeg-8" ,libjpeg-8)
|
(inputs `(("libjpeg-8" ,libjpeg-8)
|
||||||
("libtiff" ,libtiff)
|
("libtiff" ,libtiff)
|
||||||
|
@ -118,13 +119,13 @@ printing, and psresize, for adjusting page sizes.")
|
||||||
(define-public ghostscript
|
(define-public ghostscript
|
||||||
(package
|
(package
|
||||||
(name "ghostscript")
|
(name "ghostscript")
|
||||||
(version "9.06.0")
|
(version "9.14.0")
|
||||||
(source (origin
|
(source (origin
|
||||||
(method url-fetch)
|
(method url-fetch)
|
||||||
(uri (string-append "mirror://gnu/ghostscript/gnu-ghostscript-"
|
(uri (string-append "mirror://gnu/ghostscript/gnu-ghostscript-"
|
||||||
version ".tar.xz"))
|
version ".tar.xz"))
|
||||||
(sha256 (base32
|
(sha256 (base32
|
||||||
"0bcg2203p7cm0f53f3s883xhj2c91xnaxakj2cy7kcdknfxplvs4"))))
|
"0q4jj41p0qbr4mgcc9q78f5zs8cm1g57wgryhsm2yq4lfslm3ib1"))))
|
||||||
(build-system gnu-build-system)
|
(build-system gnu-build-system)
|
||||||
(inputs `(("freetype" ,freetype)
|
(inputs `(("freetype" ,freetype)
|
||||||
("lcms" ,lcms)
|
("lcms" ,lcms)
|
||||||
|
@ -160,7 +161,7 @@ printing, and psresize, for adjusting page sizes.")
|
||||||
file format. It also includes a C library that implements the graphics
|
file format. It also includes a C library that implements the graphics
|
||||||
capabilities of the PostScript language. It supports a wide variety of
|
capabilities of the PostScript language. It supports a wide variety of
|
||||||
output file formats and printers.")
|
output file formats and printers.")
|
||||||
(license license:gpl3+)
|
(license license:agpl3+)
|
||||||
(home-page "http://www.gnu.org/software/ghostscript/")))
|
(home-page "http://www.gnu.org/software/ghostscript/")))
|
||||||
|
|
||||||
(define-public gs-fonts
|
(define-public gs-fonts
|
||||||
|
|
|
@ -47,7 +47,8 @@
|
||||||
#:use-module (gnu packages gl)
|
#:use-module (gnu packages gl)
|
||||||
#:use-module (gnu packages compression)
|
#:use-module (gnu packages compression)
|
||||||
#:use-module (gnu packages xorg)
|
#:use-module (gnu packages xorg)
|
||||||
#:use-module (gnu packages xdisorg))
|
#:use-module (gnu packages xdisorg)
|
||||||
|
#:use-module (gnu packages ncurses))
|
||||||
|
|
||||||
(define-public brasero
|
(define-public brasero
|
||||||
(package
|
(package
|
||||||
|
@ -1292,3 +1293,89 @@ engineering.")
|
||||||
(description
|
(description
|
||||||
"The default GNOME 3 themes (Adwaita and some accessibility themes).")
|
"The default GNOME 3 themes (Adwaita and some accessibility themes).")
|
||||||
(license license:lgpl2.1+)))
|
(license license:lgpl2.1+)))
|
||||||
|
|
||||||
|
(define-public vala
|
||||||
|
(package
|
||||||
|
(name "vala")
|
||||||
|
(version "0.26.1")
|
||||||
|
(source (origin
|
||||||
|
(method url-fetch)
|
||||||
|
(uri (string-append "mirror://gnome/sources/" name "/"
|
||||||
|
(version-major+minor version) "/"
|
||||||
|
name "-" version ".tar.xz"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"0swyym2papln0f62ah05dpvq3vv6fssap26jq2zqp9dkkaqsn1w4"))))
|
||||||
|
(build-system gnu-build-system)
|
||||||
|
(arguments '(#:make-flags '("CC=gcc")))
|
||||||
|
(native-inputs
|
||||||
|
`(("pkg-config" ,pkg-config)
|
||||||
|
("flex" ,flex)
|
||||||
|
("bison" ,bison)
|
||||||
|
("xsltproc" ,libxslt)
|
||||||
|
("dbus" ,dbus) ; for dbus tests
|
||||||
|
("gobject-introspection" ,gobject-introspection))) ; for gir tests
|
||||||
|
(propagated-inputs
|
||||||
|
`(("glib" ,glib))) ; required by libvala-0.26.pc
|
||||||
|
(home-page "http://live.gnome.org/Vala/")
|
||||||
|
(synopsis "Compiler for the GObject type system")
|
||||||
|
(description
|
||||||
|
"Vala is a programming language that aims to bring modern programming
|
||||||
|
language features to GNOME developers without imposing any additional runtime
|
||||||
|
requirements and without using a different ABI compared to applications and
|
||||||
|
libraries written in C.")
|
||||||
|
(license license:lgpl2.1+)))
|
||||||
|
|
||||||
|
(define-public vte
|
||||||
|
(package
|
||||||
|
(name "vte")
|
||||||
|
(version "0.38.2")
|
||||||
|
(source (origin
|
||||||
|
(method url-fetch)
|
||||||
|
(uri (string-append "mirror://gnome/sources/" name "/"
|
||||||
|
(version-major+minor version) "/"
|
||||||
|
name "-" version ".tar.xz"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"1rbxrigff9yszbgdw0gw4c2saz4d1hbbpz21phzxx14w49wvmnmj"))))
|
||||||
|
(build-system gnu-build-system)
|
||||||
|
(native-inputs
|
||||||
|
`(("pkg-config" ,pkg-config)
|
||||||
|
("intltool" ,intltool)
|
||||||
|
("vala" ,vala)
|
||||||
|
("gobject-introspection" ,gobject-introspection)
|
||||||
|
("glib" ,glib "bin") ; for glib-genmarshal, etc.
|
||||||
|
("xmllint" ,libxml2)))
|
||||||
|
(propagated-inputs
|
||||||
|
`(("gtk+" ,gtk+))) ; required by libvte-2.91.pc
|
||||||
|
(home-page "http://www.gnome.org/")
|
||||||
|
(synopsis "Virtual Terminal Emulator")
|
||||||
|
(description
|
||||||
|
"VTE is a library (libvte) implementing a terminal emulator widget for
|
||||||
|
GTK+, and a minimal sample application (vte) using that. Vte is mainly used in
|
||||||
|
gnome-terminal, but can also be used to embed a console/terminal in games,
|
||||||
|
editors, IDEs, etc.")
|
||||||
|
(license license:lgpl2.1+)))
|
||||||
|
|
||||||
|
;; stable version for gtk2, required by xfce4-terminal.
|
||||||
|
(define-public vte/gtk+-2
|
||||||
|
(package (inherit vte)
|
||||||
|
(name "vte")
|
||||||
|
(version "0.28.2")
|
||||||
|
(source (origin
|
||||||
|
(method url-fetch)
|
||||||
|
(uri (string-append "mirror://gnome/sources/" name "/"
|
||||||
|
(version-major+minor version) "/"
|
||||||
|
name "-" version ".tar.xz"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"1bmhahkf8wdsra9whd3k5l5z4rv7r58ksr8mshzajgq2ma0hpkw6"))))
|
||||||
|
(arguments
|
||||||
|
'(#:configure-flags '("--disable-python")))
|
||||||
|
(native-inputs
|
||||||
|
`(("pkg-config" ,pkg-config)
|
||||||
|
("intltool" ,intltool)
|
||||||
|
("glib" ,glib "bin"))) ; for glib-genmarshal, etc.
|
||||||
|
(propagated-inputs
|
||||||
|
`(("gtk+" ,gtk+-2) ; required by libvte.pc
|
||||||
|
("ncurses" ,ncurses))))) ; required by libvte.la
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
;;; GNU Guix --- Functional package management for GNU
|
;;; GNU Guix --- Functional package management for GNU
|
||||||
;;; Copyright © 2013 Andreas Enge <andreas@enge.fr>
|
;;; Copyright © 2013 Andreas Enge <andreas@enge.fr>
|
||||||
;;; Copyright © 2013 Ludovic Courtès <ludo@gnu.org>
|
;;; Copyright © 2013, 2014 Ludovic Courtès <ludo@gnu.org>
|
||||||
;;; Copyright © 2014 Mark H Weaver <mhw@netris.org>
|
;;; Copyright © 2014 Mark H Weaver <mhw@netris.org>
|
||||||
;;;
|
;;;
|
||||||
;;; This file is part of GNU Guix.
|
;;; This file is part of GNU Guix.
|
||||||
|
@ -53,7 +53,17 @@
|
||||||
version "/" name "-" version ".tar.xz"))
|
version "/" name "-" version ".tar.xz"))
|
||||||
(sha256
|
(sha256
|
||||||
(base32
|
(base32
|
||||||
"02r9klfc0z26w270inq652249hq0wfzvwhzvwmk0n8v8nzkk5idh"))))
|
"02r9klfc0z26w270inq652249hq0wfzvwhzvwmk0n8v8nzkk5idh"))
|
||||||
|
(patches (map search-patch
|
||||||
|
'("icecat-CVE-2014-1587-bug-1042567.patch"
|
||||||
|
"icecat-CVE-2014-1587-bug-1072847.patch"
|
||||||
|
"icecat-CVE-2014-1587-bug-1079729.patch"
|
||||||
|
"icecat-CVE-2014-1587-bug-1080312.patch"
|
||||||
|
"icecat-CVE-2014-1587-bug-1089207.patch"
|
||||||
|
"icecat-CVE-2014-1590.patch"
|
||||||
|
"icecat-CVE-2014-1592.patch"
|
||||||
|
"icecat-CVE-2014-1593.patch"
|
||||||
|
"icecat-CVE-2014-1594.patch")))))
|
||||||
(build-system gnu-build-system)
|
(build-system gnu-build-system)
|
||||||
(inputs
|
(inputs
|
||||||
`(("alsa-lib" ,alsa-lib)
|
`(("alsa-lib" ,alsa-lib)
|
||||||
|
@ -90,6 +100,7 @@
|
||||||
"--disable-debug"
|
"--disable-debug"
|
||||||
"--disable-debug-symbols"
|
"--disable-debug-symbols"
|
||||||
|
|
||||||
|
"--enable-pulseaudio"
|
||||||
"--disable-webrtc" ; webrtc fails to build
|
"--disable-webrtc" ; webrtc fails to build
|
||||||
|
|
||||||
"--with-system-zlib"
|
"--with-system-zlib"
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
;;; GNU Guix --- Functional package management for GNU
|
;;; GNU Guix --- Functional package management for GNU
|
||||||
;;; Copyright © 2013 Andreas Enge <andreas@enge.fr>
|
;;; Copyright © 2013 Andreas Enge <andreas@enge.fr>
|
||||||
|
;;; Copyright © 2014 Mark H Weaver <mhw@netris.org>
|
||||||
;;;
|
;;;
|
||||||
;;; This file is part of GNU Guix.
|
;;; This file is part of GNU Guix.
|
||||||
;;;
|
;;;
|
||||||
|
@ -31,20 +32,21 @@
|
||||||
(define-public groff
|
(define-public groff
|
||||||
(package
|
(package
|
||||||
(name "groff")
|
(name "groff")
|
||||||
(version "1.22.2")
|
(version "1.22.3")
|
||||||
(source (origin
|
(source (origin
|
||||||
(method url-fetch)
|
(method url-fetch)
|
||||||
(uri (string-append "mirror://gnu/groff/groff-" version
|
(uri (string-append "mirror://gnu/groff/groff-" version
|
||||||
".tar.gz"))
|
".tar.gz"))
|
||||||
(sha256 (base32
|
(sha256 (base32
|
||||||
"0xi07nhj5vdgax37rj25mwxzdmsz1ifx50hjgc6hqbkpqkd6821q"))))
|
"1998v2kcs288d3y7kfxpvl369nqi06zbbvjzafyvyl3pr7bajj1s"))))
|
||||||
(build-system gnu-build-system)
|
(build-system gnu-build-system)
|
||||||
(inputs `(("ghostscript" ,ghostscript)
|
(inputs `(("ghostscript" ,ghostscript)
|
||||||
("netpbm" ,netpbm)))
|
("netpbm" ,netpbm)))
|
||||||
(native-inputs `(("bison" ,bison)
|
(native-inputs `(("bison" ,bison)
|
||||||
("perl" ,perl)
|
("perl" ,perl)
|
||||||
("psutils" ,psutils)
|
("psutils" ,psutils)
|
||||||
("texinfo" ,texinfo)))
|
("texinfo" ,texinfo)))
|
||||||
|
(arguments '(#:parallel-build? #f)) ; parallel build fails
|
||||||
(synopsis "Typesetting from plain text mixed with formatting commands")
|
(synopsis "Typesetting from plain text mixed with formatting commands")
|
||||||
(description
|
(description
|
||||||
"Groff is a typesetting package that reads plain text and produces
|
"Groff is a typesetting package that reads plain text and produces
|
||||||
|
|
|
@ -192,7 +192,7 @@ for SYSTEM, or #f if there is no configuration for SYSTEM."
|
||||||
#f)))
|
#f)))
|
||||||
|
|
||||||
(define-public linux-libre
|
(define-public linux-libre
|
||||||
(let* ((version "3.18")
|
(let* ((version "3.18.1")
|
||||||
(build-phase
|
(build-phase
|
||||||
'(lambda* (#:key system inputs #:allow-other-keys #:rest args)
|
'(lambda* (#:key system inputs #:allow-other-keys #:rest args)
|
||||||
;; Apply the neat patch.
|
;; Apply the neat patch.
|
||||||
|
@ -265,7 +265,7 @@ for SYSTEM, or #f if there is no configuration for SYSTEM."
|
||||||
(uri (linux-libre-urls version))
|
(uri (linux-libre-urls version))
|
||||||
(sha256
|
(sha256
|
||||||
(base32
|
(base32
|
||||||
"1kv03bhls9rya4sg3qixyjirc79pn2g5bcwldcj7hs4apa77sd0g"))))
|
"0yj6sz9cvsbhrc9jksr4wgg63crzmqh65903l7bq9k0gz1f3x1s8"))))
|
||||||
(build-system gnu-build-system)
|
(build-system gnu-build-system)
|
||||||
(native-inputs `(("perl" ,perl)
|
(native-inputs `(("perl" ,perl)
|
||||||
("bc" ,bc)
|
("bc" ,bc)
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
;;; GNU Guix --- Functional package management for GNU
|
;;; GNU Guix --- Functional package management for GNU
|
||||||
;;; Copyright 2014 John Darrington <jmd@gnu.org>
|
;;; Copyright © 2014 John Darrington <jmd@gnu.org>
|
||||||
|
;;; Copyright © 2014 Mark H Weaver <mhw@netris.org>
|
||||||
;;;
|
;;;
|
||||||
;;; This file is part of GNU Guix.
|
;;; This file is part of GNU Guix.
|
||||||
;;;
|
;;;
|
||||||
|
@ -20,8 +21,11 @@
|
||||||
#:use-module (gnu packages)
|
#:use-module (gnu packages)
|
||||||
#:use-module (gnu packages which)
|
#:use-module (gnu packages which)
|
||||||
#:use-module (gnu packages linux)
|
#:use-module (gnu packages linux)
|
||||||
#:use-module (guix licenses)
|
#:use-module (gnu packages pkg-config)
|
||||||
|
#:use-module (gnu packages openssl)
|
||||||
|
#:use-module ((guix licenses) #:prefix l:)
|
||||||
#:use-module (guix packages)
|
#:use-module (guix packages)
|
||||||
|
#:use-module (guix utils)
|
||||||
#:use-module (guix download)
|
#:use-module (guix download)
|
||||||
#:use-module (guix build-system gnu)
|
#:use-module (guix build-system gnu)
|
||||||
#:use-module (srfi srfi-1))
|
#:use-module (srfi srfi-1))
|
||||||
|
@ -29,29 +33,31 @@
|
||||||
(define-public ntp
|
(define-public ntp
|
||||||
(package
|
(package
|
||||||
(name "ntp")
|
(name "ntp")
|
||||||
(version "4.2.6p5")
|
(version "4.2.8")
|
||||||
(source (origin
|
(source (origin
|
||||||
(method url-fetch)
|
(method url-fetch)
|
||||||
(uri (string-append
|
(uri (string-append
|
||||||
"http://www.eecis.udel.edu/~ntp/ntp_spool/ntp4/ntp-"
|
"http://archive.ntp.org/ntp4/ntp-"
|
||||||
(string-join (take (string-split version #\.) 2) ".")
|
(version-major+minor version)
|
||||||
"/ntp-" version ".tar.gz"))
|
"/ntp-" version ".tar.gz"))
|
||||||
(sha256
|
(sha256
|
||||||
(base32
|
(base32
|
||||||
"077r69a41hasl8zf5c44km7cqgfhrkaj6a4jnr75j7nkz5qq7ayn"))))
|
"1vnqa1542d01xmlkw8f3rq57y360b2j7yxkkg9b11955nvw0v4if"))))
|
||||||
(native-inputs `(("which" ,which)))
|
(native-inputs `(("which" ,which)
|
||||||
|
("pkg-config" ,pkg-config)))
|
||||||
(inputs
|
(inputs
|
||||||
;; Build with POSIX capabilities support on GNU/Linux. This allows 'ntpd'
|
`(("openssl" ,openssl)
|
||||||
;; to run as non-root (when invoked with '-u'.)
|
;; Build with POSIX capabilities support on GNU/Linux. This allows 'ntpd'
|
||||||
(if (string-suffix? "-linux"
|
;; to run as non-root (when invoked with '-u'.)
|
||||||
(or (%current-target-system) (%current-system)))
|
,@(if (string-suffix? "-linux"
|
||||||
`(("libcap" ,libcap))
|
(or (%current-target-system) (%current-system)))
|
||||||
'()))
|
`(("libcap" ,libcap))
|
||||||
|
'())))
|
||||||
(build-system gnu-build-system)
|
(build-system gnu-build-system)
|
||||||
(synopsis "Real time clock synchonization system")
|
(synopsis "Real time clock synchonization system")
|
||||||
(description "NTP is a system designed to synchronize the clocks of
|
(description "NTP is a system designed to synchronize the clocks of
|
||||||
computers over a network.")
|
computers over a network.")
|
||||||
(license (x11-style
|
(license (l:x11-style
|
||||||
"http://www.eecis.udel.edu/~mills/ntp/html/copyright.html"
|
"http://www.eecis.udel.edu/~mills/ntp/html/copyright.html"
|
||||||
"A non-copyleft free licence from the University of Delaware"))
|
"A non-copyleft free licence from the University of Delaware"))
|
||||||
(home-page "http://www.ntp.org")))
|
(home-page "http://www.ntp.org")))
|
||||||
|
|
|
@ -0,0 +1,30 @@
|
||||||
|
commit 60529fc02cf10482d8fecd699eea271ddc22bcb9
|
||||||
|
Author: Jason Orendorff <jorendorff@mozilla.com>
|
||||||
|
Date: Thu Aug 28 15:43:57 2014 -0500
|
||||||
|
|
||||||
|
Bug 1042567 - Reflect JSPropertyOp properties more consistently as data properties. r=efaust, a=lmandel
|
||||||
|
|
||||||
|
Modified js/src/jsobj.cpp
|
||||||
|
diff --git a/js/src/jsobj.cpp b/js/src/jsobj.cpp
|
||||||
|
index 2745509..ad336f3 100644
|
||||||
|
--- a/js/src/jsobj.cpp
|
||||||
|
+++ b/js/src/jsobj.cpp
|
||||||
|
@@ -235,11 +235,18 @@ js::GetOwnPropertyDescriptor(JSContext *cx, HandleObject obj, HandleId id,
|
||||||
|
if (pobj->isNative()) {
|
||||||
|
desc.setAttributes(GetShapeAttributes(pobj, shape));
|
||||||
|
if (desc.hasGetterOrSetterObject()) {
|
||||||
|
+ MOZ_ASSERT(desc.isShared());
|
||||||
|
doGet = false;
|
||||||
|
if (desc.hasGetterObject())
|
||||||
|
desc.setGetterObject(shape->getterObject());
|
||||||
|
if (desc.hasSetterObject())
|
||||||
|
desc.setSetterObject(shape->setterObject());
|
||||||
|
+ } else {
|
||||||
|
+ // This is either a straight-up data property or (rarely) a
|
||||||
|
+ // property with a JSPropertyOp getter/setter. The latter must be
|
||||||
|
+ // reported to the caller as a plain data property, so don't
|
||||||
|
+ // populate desc.getter/setter, and mask away the SHARED bit.
|
||||||
|
+ desc.attributesRef() &= ~JSPROP_SHARED;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (!JSObject::getGenericAttributes(cx, pobj, id, &desc.attributesRef()))
|
|
@ -0,0 +1,19 @@
|
||||||
|
commit 5d91f3b10f999e852e0392470198bd6aefc87e1e
|
||||||
|
Author: Jeff Muizelaar <jmuizelaar@mozilla.com>
|
||||||
|
Date: Tue Oct 28 10:08:25 2014 -0400
|
||||||
|
|
||||||
|
Bug 1072847 - Initialize mSurface. r=BenWa, a=bkerensa
|
||||||
|
|
||||||
|
Modified gfx/2d/DrawTargetCairo.cpp
|
||||||
|
diff --git a/gfx/2d/DrawTargetCairo.cpp b/gfx/2d/DrawTargetCairo.cpp
|
||||||
|
index 48c2c73..78d9e4f 100644
|
||||||
|
--- a/gfx/2d/DrawTargetCairo.cpp
|
||||||
|
+++ b/gfx/2d/DrawTargetCairo.cpp
|
||||||
|
@@ -353,6 +353,7 @@ NeedIntermediateSurface(const Pattern& aPattern, const DrawOptions& aOptions)
|
||||||
|
|
||||||
|
DrawTargetCairo::DrawTargetCairo()
|
||||||
|
: mContext(nullptr)
|
||||||
|
+ , mSurface(nullptr)
|
||||||
|
, mLockedBits(nullptr)
|
||||||
|
{
|
||||||
|
}
|
|
@ -0,0 +1,191 @@
|
||||||
|
commit 5de6730cc26744b9efcf4d4adb4a4c45023ef8a0
|
||||||
|
Author: Randell Jesup <rjesup@jesup.org>
|
||||||
|
Date: Tue Oct 28 11:06:00 2014 -0400
|
||||||
|
|
||||||
|
Bug 1079729: Fix handling of increasing number of SCTP channels used by DataChannels r=tuexen a=lsblakk
|
||||||
|
|
||||||
|
Modified media/webrtc/signaling/src/sipcc/core/gsm/h/fsm.h
|
||||||
|
diff --git a/media/webrtc/signaling/src/sipcc/core/gsm/h/fsm.h b/media/webrtc/signaling/src/sipcc/core/gsm/h/fsm.h
|
||||||
|
index ba8e1ff..8d964f1 100755
|
||||||
|
--- a/media/webrtc/signaling/src/sipcc/core/gsm/h/fsm.h
|
||||||
|
+++ b/media/webrtc/signaling/src/sipcc/core/gsm/h/fsm.h
|
||||||
|
@@ -225,7 +225,7 @@ typedef struct fsmdef_media_t_ {
|
||||||
|
/*
|
||||||
|
* Data Channel properties
|
||||||
|
*/
|
||||||
|
-#define WEBRTC_DATACHANNEL_STREAMS_DEFAULT 16
|
||||||
|
+#define WEBRTC_DATACHANNEL_STREAMS_DEFAULT 256
|
||||||
|
uint32 datachannel_streams;
|
||||||
|
char datachannel_protocol[SDP_MAX_STRING_LEN + 1];
|
||||||
|
|
||||||
|
Modified netwerk/sctp/datachannel/DataChannel.cpp
|
||||||
|
diff --git a/netwerk/sctp/datachannel/DataChannel.cpp b/netwerk/sctp/datachannel/DataChannel.cpp
|
||||||
|
index 414e3db..a00d938 100644
|
||||||
|
--- a/netwerk/sctp/datachannel/DataChannel.cpp
|
||||||
|
+++ b/netwerk/sctp/datachannel/DataChannel.cpp
|
||||||
|
@@ -910,10 +910,12 @@ DataChannelConnection::RequestMoreStreams(int32_t aNeeded)
|
||||||
|
uint32_t outStreamsNeeded;
|
||||||
|
socklen_t len;
|
||||||
|
|
||||||
|
- if (aNeeded + mStreams.Length() > MAX_NUM_STREAMS)
|
||||||
|
+ if (aNeeded + mStreams.Length() > MAX_NUM_STREAMS) {
|
||||||
|
aNeeded = MAX_NUM_STREAMS - mStreams.Length();
|
||||||
|
- if (aNeeded <= 0)
|
||||||
|
+ }
|
||||||
|
+ if (aNeeded <= 0) {
|
||||||
|
return false;
|
||||||
|
+ }
|
||||||
|
|
||||||
|
len = (socklen_t)sizeof(struct sctp_status);
|
||||||
|
if (usrsctp_getsockopt(mMasterSocket, IPPROTO_SCTP, SCTP_STATUS, &status, &len) < 0) {
|
||||||
|
@@ -922,19 +924,25 @@ DataChannelConnection::RequestMoreStreams(int32_t aNeeded)
|
||||||
|
}
|
||||||
|
outStreamsNeeded = aNeeded; // number to add
|
||||||
|
|
||||||
|
- memset(&sas, 0, sizeof(struct sctp_add_streams));
|
||||||
|
+ // Note: if multiple channel opens happen when we don't have enough space,
|
||||||
|
+ // we'll call RequestMoreStreams() multiple times
|
||||||
|
+ memset(&sas, 0, sizeof(sas));
|
||||||
|
sas.sas_instrms = 0;
|
||||||
|
sas.sas_outstrms = (uint16_t)outStreamsNeeded; /* XXX error handling */
|
||||||
|
// Doesn't block, we get an event when it succeeds or fails
|
||||||
|
if (usrsctp_setsockopt(mMasterSocket, IPPROTO_SCTP, SCTP_ADD_STREAMS, &sas,
|
||||||
|
(socklen_t) sizeof(struct sctp_add_streams)) < 0) {
|
||||||
|
- if (errno == EALREADY)
|
||||||
|
+ if (errno == EALREADY) {
|
||||||
|
+ LOG(("Already have %u output streams", outStreamsNeeded));
|
||||||
|
return true;
|
||||||
|
+ }
|
||||||
|
|
||||||
|
LOG(("***failed: setsockopt ADD errno=%d", errno));
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
LOG(("Requested %u more streams", outStreamsNeeded));
|
||||||
|
+ // We add to mStreams when we get a SCTP_STREAM_CHANGE_EVENT and the
|
||||||
|
+ // values are larger than mStreams.Length()
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
@@ -1050,6 +1058,13 @@ DataChannelConnection::SendDeferredMessages()
|
||||||
|
channel->mFlags & DATA_CHANNEL_FLAGS_OUT_OF_ORDER_ALLOWED,
|
||||||
|
channel->mPrPolicy, channel->mPrValue)) {
|
||||||
|
channel->mFlags &= ~DATA_CHANNEL_FLAGS_SEND_REQ;
|
||||||
|
+
|
||||||
|
+ channel->mState = OPEN;
|
||||||
|
+ channel->mReady = true;
|
||||||
|
+ LOG(("%s: sending ON_CHANNEL_OPEN for %p", __FUNCTION__, channel.get()));
|
||||||
|
+ NS_DispatchToMainThread(new DataChannelOnMessageAvailable(
|
||||||
|
+ DataChannelOnMessageAvailable::ON_CHANNEL_OPEN, this,
|
||||||
|
+ channel));
|
||||||
|
sent = true;
|
||||||
|
} else {
|
||||||
|
if (errno == EAGAIN || errno == EWOULDBLOCK) {
|
||||||
|
@@ -1177,6 +1192,7 @@ DataChannelConnection::HandleOpenRequestMessage(const struct rtcweb_datachannel_
|
||||||
|
prPolicy = SCTP_PR_SCTP_TTL;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
+ LOG(("Unknown channel type", req->channel_type));
|
||||||
|
/* XXX error handling */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
@@ -1203,6 +1219,10 @@ DataChannelConnection::HandleOpenRequestMessage(const struct rtcweb_datachannel_
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
+ if (stream >= mStreams.Length()) {
|
||||||
|
+ LOG(("%s: stream %u out of bounds (%u)", __FUNCTION__, stream, mStreams.Length()));
|
||||||
|
+ return;
|
||||||
|
+ }
|
||||||
|
|
||||||
|
nsCString label(nsDependentCSubstring(&req->label[0], ntohs(req->label_length)));
|
||||||
|
nsCString protocol(nsDependentCSubstring(&req->label[ntohs(req->label_length)],
|
||||||
|
@@ -1220,8 +1240,8 @@ DataChannelConnection::HandleOpenRequestMessage(const struct rtcweb_datachannel_
|
||||||
|
|
||||||
|
channel->mState = DataChannel::WAITING_TO_OPEN;
|
||||||
|
|
||||||
|
- LOG(("%s: sending ON_CHANNEL_CREATED for %s/%s: %u", __FUNCTION__,
|
||||||
|
- channel->mLabel.get(), channel->mProtocol.get(), stream));
|
||||||
|
+ LOG(("%s: sending ON_CHANNEL_CREATED for %s/%s: %u (state %u)", __FUNCTION__,
|
||||||
|
+ channel->mLabel.get(), channel->mProtocol.get(), stream, channel->mState));
|
||||||
|
NS_DispatchToMainThread(new DataChannelOnMessageAvailable(
|
||||||
|
DataChannelOnMessageAvailable::ON_CHANNEL_CREATED,
|
||||||
|
this, channel));
|
||||||
|
@@ -1739,13 +1759,14 @@ DataChannelConnection::HandleStreamResetEvent(const struct sctp_stream_reset_eve
|
||||||
|
// 2. We sent our own reset (CLOSING); either they crossed on the
|
||||||
|
// wire, or this is a response to our Reset.
|
||||||
|
// Go to CLOSED
|
||||||
|
- // 3. We've sent a open but haven't gotten a response yet (OPENING)
|
||||||
|
+ // 3. We've sent a open but haven't gotten a response yet (CONNECTING)
|
||||||
|
// I believe this is impossible, as we don't have an input stream yet.
|
||||||
|
|
||||||
|
LOG(("Incoming: Channel %u closed, state %d",
|
||||||
|
channel->mStream, channel->mState));
|
||||||
|
ASSERT_WEBRTC(channel->mState == DataChannel::OPEN ||
|
||||||
|
channel->mState == DataChannel::CLOSING ||
|
||||||
|
+ channel->mState == DataChannel::CONNECTING ||
|
||||||
|
channel->mState == DataChannel::WAITING_TO_OPEN);
|
||||||
|
if (channel->mState == DataChannel::OPEN ||
|
||||||
|
channel->mState == DataChannel::WAITING_TO_OPEN) {
|
||||||
|
@@ -1791,20 +1812,21 @@ DataChannelConnection::HandleStreamChangeEvent(const struct sctp_stream_change_e
|
||||||
|
return;
|
||||||
|
} else {
|
||||||
|
if (strchg->strchange_instrms > mStreams.Length()) {
|
||||||
|
- LOG(("Other side increased streamds from %u to %u",
|
||||||
|
+ LOG(("Other side increased streams from %u to %u",
|
||||||
|
mStreams.Length(), strchg->strchange_instrms));
|
||||||
|
}
|
||||||
|
- if (strchg->strchange_outstrms > mStreams.Length()) {
|
||||||
|
+ if (strchg->strchange_outstrms > mStreams.Length() ||
|
||||||
|
+ strchg->strchange_instrms > mStreams.Length()) {
|
||||||
|
uint16_t old_len = mStreams.Length();
|
||||||
|
+ uint16_t new_len = std::max(strchg->strchange_outstrms,
|
||||||
|
+ strchg->strchange_instrms);
|
||||||
|
LOG(("Increasing number of streams from %u to %u - adding %u (in: %u)",
|
||||||
|
- old_len,
|
||||||
|
- strchg->strchange_outstrms,
|
||||||
|
- strchg->strchange_outstrms - old_len,
|
||||||
|
+ old_len, new_len, new_len - old_len,
|
||||||
|
strchg->strchange_instrms));
|
||||||
|
// make sure both are the same length
|
||||||
|
- mStreams.AppendElements(strchg->strchange_outstrms - old_len);
|
||||||
|
+ mStreams.AppendElements(new_len - old_len);
|
||||||
|
LOG(("New length = %d (was %d)", mStreams.Length(), old_len));
|
||||||
|
- for (uint32_t i = old_len; i < mStreams.Length(); ++i) {
|
||||||
|
+ for (size_t i = old_len; i < mStreams.Length(); ++i) {
|
||||||
|
mStreams[i] = nullptr;
|
||||||
|
}
|
||||||
|
// Re-process any channels waiting for streams.
|
||||||
|
@@ -1815,13 +1837,17 @@ DataChannelConnection::HandleStreamChangeEvent(const struct sctp_stream_change_e
|
||||||
|
// Could make a more complex API for OpenXxxFinish() and avoid this loop
|
||||||
|
int32_t num_needed = mPending.GetSize();
|
||||||
|
LOG(("%d of %d new streams already needed", num_needed,
|
||||||
|
- strchg->strchange_outstrms - old_len));
|
||||||
|
- num_needed -= (strchg->strchange_outstrms - old_len); // number we added
|
||||||
|
+ new_len - old_len));
|
||||||
|
+ num_needed -= (new_len - old_len); // number we added
|
||||||
|
if (num_needed > 0) {
|
||||||
|
if (num_needed < 16)
|
||||||
|
num_needed = 16;
|
||||||
|
LOG(("Not enough new streams, asking for %d more", num_needed));
|
||||||
|
RequestMoreStreams(num_needed);
|
||||||
|
+ } else if (strchg->strchange_outstrms < strchg->strchange_instrms) {
|
||||||
|
+ LOG(("Requesting %d output streams to match partner",
|
||||||
|
+ strchg->strchange_instrms - strchg->strchange_outstrms));
|
||||||
|
+ RequestMoreStreams(strchg->strchange_instrms - strchg->strchange_outstrms);
|
||||||
|
}
|
||||||
|
|
||||||
|
ProcessQueuedOpens();
|
||||||
|
Modified netwerk/sctp/datachannel/DataChannelProtocol.h
|
||||||
|
diff --git a/netwerk/sctp/datachannel/DataChannelProtocol.h b/netwerk/sctp/datachannel/DataChannelProtocol.h
|
||||||
|
index 549f74b..74fbe58 100644
|
||||||
|
--- a/netwerk/sctp/datachannel/DataChannelProtocol.h
|
||||||
|
+++ b/netwerk/sctp/datachannel/DataChannelProtocol.h
|
||||||
|
@@ -17,7 +17,7 @@
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Duplicated in fsm.def
|
||||||
|
-#define WEBRTC_DATACHANNEL_STREAMS_DEFAULT 16
|
||||||
|
+#define WEBRTC_DATACHANNEL_STREAMS_DEFAULT 256
|
||||||
|
|
||||||
|
#define DATA_CHANNEL_PPID_CONTROL 50
|
||||||
|
#define DATA_CHANNEL_PPID_BINARY 52
|
|
@ -0,0 +1,308 @@
|
||||||
|
commit d74bdb4589ad714e2a45e282974db075de2be673
|
||||||
|
Author: Randell Jesup <rjesup@jesup.org>
|
||||||
|
Date: Wed Nov 12 22:59:53 2014 -0500
|
||||||
|
|
||||||
|
Bug 1080312 - Update iteration code from upstream. r=jesup, a=abillings
|
||||||
|
|
||||||
|
Modified netwerk/sctp/src/moz.build
|
||||||
|
diff --git a/netwerk/sctp/src/moz.build b/netwerk/sctp/src/moz.build
|
||||||
|
index 1901a41..82103b9 100644
|
||||||
|
--- a/netwerk/sctp/src/moz.build
|
||||||
|
+++ b/netwerk/sctp/src/moz.build
|
||||||
|
@@ -31,7 +31,6 @@ SOURCES += [
|
||||||
|
'user_environment.c',
|
||||||
|
'user_mbuf.c',
|
||||||
|
'user_recv_thread.c',
|
||||||
|
- 'user_sctp_timer_iterate.c',
|
||||||
|
'user_socket.c',
|
||||||
|
]
|
||||||
|
|
||||||
|
Modified netwerk/sctp/src/netinet/sctp_callout.c
|
||||||
|
diff --git a/netwerk/sctp/src/netinet/sctp_callout.c b/netwerk/sctp/src/netinet/sctp_callout.c
|
||||||
|
index 67b7566..e8ac77f 100755
|
||||||
|
--- a/netwerk/sctp/src/netinet/sctp_callout.c
|
||||||
|
+++ b/netwerk/sctp/src/netinet/sctp_callout.c
|
||||||
|
@@ -30,9 +30,27 @@
|
||||||
|
* THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
*/
|
||||||
|
|
||||||
|
+#if defined(__Userspace__)
|
||||||
|
+#include <sys/types.h>
|
||||||
|
+#if !defined (__Userspace_os_Windows)
|
||||||
|
+#include <sys/wait.h>
|
||||||
|
+#include <unistd.h>
|
||||||
|
+#include <pthread.h>
|
||||||
|
+#endif
|
||||||
|
+#if defined(__Userspace_os_NaCl)
|
||||||
|
+#include <sys/select.h>
|
||||||
|
+#endif
|
||||||
|
+#include <stdlib.h>
|
||||||
|
+#include <string.h>
|
||||||
|
+#include <stdio.h>
|
||||||
|
+#include <errno.h>
|
||||||
|
+#include <netinet/sctp_sysctl.h>
|
||||||
|
+#include <netinet/sctp_pcb.h>
|
||||||
|
+#else
|
||||||
|
#include <netinet/sctp_os.h>
|
||||||
|
#include <netinet/sctp_callout.h>
|
||||||
|
#include <netinet/sctp_pcb.h>
|
||||||
|
+#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Callout/Timer routines for OS that doesn't have them
|
||||||
|
@@ -117,24 +135,16 @@ sctp_os_timer_stop(sctp_os_timer_t *c)
|
||||||
|
return (1);
|
||||||
|
}
|
||||||
|
|
||||||
|
-#if defined(__APPLE__)
|
||||||
|
-/*
|
||||||
|
- * For __APPLE__, use a single main timer at a faster resolution than
|
||||||
|
- * fastim. The timer just calls this existing callout infrastructure.
|
||||||
|
- */
|
||||||
|
-#endif
|
||||||
|
-void
|
||||||
|
-sctp_timeout(void *arg SCTP_UNUSED)
|
||||||
|
+static void
|
||||||
|
+sctp_handle_tick(int delta)
|
||||||
|
{
|
||||||
|
sctp_os_timer_t *c;
|
||||||
|
void (*c_func)(void *);
|
||||||
|
void *c_arg;
|
||||||
|
|
||||||
|
SCTP_TIMERQ_LOCK();
|
||||||
|
-#if defined(__APPLE__)
|
||||||
|
/* update our tick count */
|
||||||
|
- ticks += SCTP_BASE_VAR(sctp_main_timer_ticks);
|
||||||
|
-#endif
|
||||||
|
+ ticks += delta;
|
||||||
|
c = TAILQ_FIRST(&SCTP_BASE_INFO(callqueue));
|
||||||
|
while (c) {
|
||||||
|
if (c->c_time <= ticks) {
|
||||||
|
@@ -155,9 +165,60 @@ sctp_timeout(void *arg SCTP_UNUSED)
|
||||||
|
}
|
||||||
|
sctp_os_timer_next = NULL;
|
||||||
|
SCTP_TIMERQ_UNLOCK();
|
||||||
|
+}
|
||||||
|
|
||||||
|
#if defined(__APPLE__)
|
||||||
|
- /* restart the main timer */
|
||||||
|
+void
|
||||||
|
+sctp_timeout(void *arg SCTP_UNUSED)
|
||||||
|
+{
|
||||||
|
+ sctp_handle_tick(SCTP_BASE_VAR(sctp_main_timer_ticks));
|
||||||
|
sctp_start_main_timer();
|
||||||
|
+}
|
||||||
|
#endif
|
||||||
|
+
|
||||||
|
+#if defined(__Userspace__)
|
||||||
|
+#define TIMEOUT_INTERVAL 10
|
||||||
|
+
|
||||||
|
+void *
|
||||||
|
+user_sctp_timer_iterate(void *arg)
|
||||||
|
+{
|
||||||
|
+ for (;;) {
|
||||||
|
+#if defined (__Userspace_os_Windows)
|
||||||
|
+ Sleep(TIMEOUT_INTERVAL);
|
||||||
|
+#else
|
||||||
|
+ struct timeval timeout;
|
||||||
|
+
|
||||||
|
+ timeout.tv_sec = 0;
|
||||||
|
+ timeout.tv_usec = 1000 * TIMEOUT_INTERVAL;
|
||||||
|
+ select(0, NULL, NULL, NULL, &timeout);
|
||||||
|
+#endif
|
||||||
|
+ if (SCTP_BASE_VAR(timer_thread_should_exit)) {
|
||||||
|
+ break;
|
||||||
|
+ }
|
||||||
|
+ sctp_handle_tick(MSEC_TO_TICKS(TIMEOUT_INTERVAL));
|
||||||
|
+ }
|
||||||
|
+ return (NULL);
|
||||||
|
}
|
||||||
|
+
|
||||||
|
+void
|
||||||
|
+sctp_start_timer(void)
|
||||||
|
+{
|
||||||
|
+ /*
|
||||||
|
+ * No need to do SCTP_TIMERQ_LOCK_INIT();
|
||||||
|
+ * here, it is being done in sctp_pcb_init()
|
||||||
|
+ */
|
||||||
|
+#if defined (__Userspace_os_Windows)
|
||||||
|
+ if ((SCTP_BASE_VAR(timer_thread) = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)user_sctp_timer_iterate, NULL, 0, NULL)) == NULL) {
|
||||||
|
+ SCTP_PRINTF("ERROR; Creating ithread failed\n");
|
||||||
|
+ }
|
||||||
|
+#else
|
||||||
|
+ int rc;
|
||||||
|
+
|
||||||
|
+ rc = pthread_create(&SCTP_BASE_VAR(timer_thread), NULL, user_sctp_timer_iterate, NULL);
|
||||||
|
+ if (rc) {
|
||||||
|
+ SCTP_PRINTF("ERROR; return code from pthread_create() is %d\n", rc);
|
||||||
|
+ }
|
||||||
|
+#endif
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+#endif
|
||||||
|
Modified netwerk/sctp/src/netinet/sctp_callout.h
|
||||||
|
diff --git a/netwerk/sctp/src/netinet/sctp_callout.h b/netwerk/sctp/src/netinet/sctp_callout.h
|
||||||
|
index 2782945..c53c5a4 100755
|
||||||
|
--- a/netwerk/sctp/src/netinet/sctp_callout.h
|
||||||
|
+++ b/netwerk/sctp/src/netinet/sctp_callout.h
|
||||||
|
@@ -64,7 +64,6 @@ __FBSDID("$FreeBSD$");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
extern int ticks;
|
||||||
|
-extern void sctp_start_timer();
|
||||||
|
#endif
|
||||||
|
|
||||||
|
TAILQ_HEAD(calloutlist, sctp_callout);
|
||||||
|
@@ -94,6 +93,11 @@ int sctp_os_timer_stop(sctp_os_timer_t *);
|
||||||
|
#define SCTP_OS_TIMER_ACTIVE(tmr) ((tmr)->c_flags & SCTP_CALLOUT_ACTIVE)
|
||||||
|
#define SCTP_OS_TIMER_DEACTIVATE(tmr) ((tmr)->c_flags &= ~SCTP_CALLOUT_ACTIVE)
|
||||||
|
|
||||||
|
+#if defined(__Userspace__)
|
||||||
|
+void sctp_start_timer(void);
|
||||||
|
+#endif
|
||||||
|
+#if defined(__APPLE__)
|
||||||
|
void sctp_timeout(void *);
|
||||||
|
+#endif
|
||||||
|
|
||||||
|
#endif
|
||||||
|
Modified netwerk/sctp/src/netinet/sctp_usrreq.c
|
||||||
|
diff --git a/netwerk/sctp/src/netinet/sctp_usrreq.c b/netwerk/sctp/src/netinet/sctp_usrreq.c
|
||||||
|
index d4115ad..c17ea04 100755
|
||||||
|
--- a/netwerk/sctp/src/netinet/sctp_usrreq.c
|
||||||
|
+++ b/netwerk/sctp/src/netinet/sctp_usrreq.c
|
||||||
|
@@ -56,6 +56,9 @@ __FBSDID("$FreeBSD: head/sys/netinet/sctp_usrreq.c 259943 2013-12-27 13:07:00Z t
|
||||||
|
#include <netinet/sctp_timer.h>
|
||||||
|
#include <netinet/sctp_auth.h>
|
||||||
|
#include <netinet/sctp_bsd_addr.h>
|
||||||
|
+#if defined(__Userspace__)
|
||||||
|
+#include <netinet/sctp_callout.h>
|
||||||
|
+#endif
|
||||||
|
#if !defined(__Userspace_os_Windows)
|
||||||
|
#include <netinet/udp.h>
|
||||||
|
#endif
|
||||||
|
Deleted netwerk/sctp/src/user_sctp_timer_iterate.c
|
||||||
|
diff --git a/netwerk/sctp/src/user_sctp_timer_iterate.c b/netwerk/sctp/src/user_sctp_timer_iterate.c
|
||||||
|
deleted file mode 100755
|
||||||
|
index 0a9dbce..0000000
|
||||||
|
--- a/netwerk/sctp/src/user_sctp_timer_iterate.c
|
||||||
|
+++ /dev/null
|
||||||
|
@@ -1,119 +0,0 @@
|
||||||
|
-/*-
|
||||||
|
- * Copyright (c) 2012 Michael Tuexen
|
||||||
|
- * All rights reserved.
|
||||||
|
- *
|
||||||
|
- * Redistribution and use in source and binary forms, with or without
|
||||||
|
- * modification, are permitted provided that the following conditions
|
||||||
|
- * are met:
|
||||||
|
- * 1. Redistributions of source code must retain the above copyright
|
||||||
|
- * notice, this list of conditions and the following disclaimer.
|
||||||
|
- * 2. Redistributions in binary form must reproduce the above copyright
|
||||||
|
- * notice, this list of conditions and the following disclaimer in the
|
||||||
|
- * documentation and/or other materials provided with the distribution.
|
||||||
|
- *
|
||||||
|
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||||
|
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||||
|
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||||
|
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||||
|
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||||
|
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||||
|
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||||
|
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||||
|
- * SUCH DAMAGE.
|
||||||
|
- *
|
||||||
|
- */
|
||||||
|
-
|
||||||
|
-#include <sys/types.h>
|
||||||
|
-#if !defined (__Userspace_os_Windows)
|
||||||
|
-#include <sys/wait.h>
|
||||||
|
-#include <unistd.h>
|
||||||
|
-#include <pthread.h>
|
||||||
|
-#endif
|
||||||
|
-#include <stdlib.h>
|
||||||
|
-#include <string.h>
|
||||||
|
-#include <stdio.h>
|
||||||
|
-#include <errno.h>
|
||||||
|
-#include <netinet/sctp_pcb.h>
|
||||||
|
-#include <netinet/sctp_sysctl.h>
|
||||||
|
-#include "netinet/sctp_callout.h"
|
||||||
|
-
|
||||||
|
-/* This is the polling time of callqueue in milliseconds
|
||||||
|
- * 10ms seems to work well. 1ms was giving erratic behavior
|
||||||
|
- */
|
||||||
|
-#define TIMEOUT_INTERVAL 10
|
||||||
|
-
|
||||||
|
-extern int ticks;
|
||||||
|
-
|
||||||
|
-void *
|
||||||
|
-user_sctp_timer_iterate(void *arg)
|
||||||
|
-{
|
||||||
|
- sctp_os_timer_t *c;
|
||||||
|
- void (*c_func)(void *);
|
||||||
|
- void *c_arg;
|
||||||
|
- sctp_os_timer_t *sctp_os_timer_next;
|
||||||
|
- /*
|
||||||
|
- * The MSEC_TO_TICKS conversion depends on hz. The to_ticks in
|
||||||
|
- * sctp_os_timer_start also depends on hz. E.g. if hz=1000 then
|
||||||
|
- * for multiple INIT the to_ticks is 2000, 4000, 8000, 16000, 32000, 60000
|
||||||
|
- * and further to_ticks level off at 60000 i.e. 60 seconds.
|
||||||
|
- * If hz=100 then for multiple INIT the to_ticks are 200, 400, 800 and so-on.
|
||||||
|
- */
|
||||||
|
- for (;;) {
|
||||||
|
-#if defined (__Userspace_os_Windows)
|
||||||
|
- Sleep(TIMEOUT_INTERVAL);
|
||||||
|
-#else
|
||||||
|
- struct timeval timeout;
|
||||||
|
-
|
||||||
|
- timeout.tv_sec = 0;
|
||||||
|
- timeout.tv_usec = 1000 * TIMEOUT_INTERVAL;
|
||||||
|
- select(0, NULL, NULL, NULL, &timeout);
|
||||||
|
-#endif
|
||||||
|
- if (SCTP_BASE_VAR(timer_thread_should_exit)) {
|
||||||
|
- break;
|
||||||
|
- }
|
||||||
|
- SCTP_TIMERQ_LOCK();
|
||||||
|
- /* update our tick count */
|
||||||
|
- ticks += MSEC_TO_TICKS(TIMEOUT_INTERVAL);
|
||||||
|
- c = TAILQ_FIRST(&SCTP_BASE_INFO(callqueue));
|
||||||
|
- while (c) {
|
||||||
|
- if (c->c_time <= ticks) {
|
||||||
|
- sctp_os_timer_next = TAILQ_NEXT(c, tqe);
|
||||||
|
- TAILQ_REMOVE(&SCTP_BASE_INFO(callqueue), c, tqe);
|
||||||
|
- c_func = c->c_func;
|
||||||
|
- c_arg = c->c_arg;
|
||||||
|
- c->c_flags &= ~SCTP_CALLOUT_PENDING;
|
||||||
|
- SCTP_TIMERQ_UNLOCK();
|
||||||
|
- c_func(c_arg);
|
||||||
|
- SCTP_TIMERQ_LOCK();
|
||||||
|
- c = sctp_os_timer_next;
|
||||||
|
- } else {
|
||||||
|
- c = TAILQ_NEXT(c, tqe);
|
||||||
|
- }
|
||||||
|
- }
|
||||||
|
- SCTP_TIMERQ_UNLOCK();
|
||||||
|
- }
|
||||||
|
- return (NULL);
|
||||||
|
-}
|
||||||
|
-
|
||||||
|
-void
|
||||||
|
-sctp_start_timer(void)
|
||||||
|
-{
|
||||||
|
- /*
|
||||||
|
- * No need to do SCTP_TIMERQ_LOCK_INIT();
|
||||||
|
- * here, it is being done in sctp_pcb_init()
|
||||||
|
- */
|
||||||
|
-#if defined (__Userspace_os_Windows)
|
||||||
|
- if ((SCTP_BASE_VAR(timer_thread) = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)user_sctp_timer_iterate, NULL, 0, NULL)) == NULL) {
|
||||||
|
- SCTP_PRINTF("ERROR; Creating ithread failed\n");
|
||||||
|
- }
|
||||||
|
-#else
|
||||||
|
- int rc;
|
||||||
|
-
|
||||||
|
- rc = pthread_create(&SCTP_BASE_VAR(timer_thread), NULL, user_sctp_timer_iterate, NULL);
|
||||||
|
- if (rc) {
|
||||||
|
- SCTP_PRINTF("ERROR; return code from pthread_create() is %d\n", rc);
|
||||||
|
- }
|
||||||
|
-#endif
|
||||||
|
-}
|
|
@ -0,0 +1,119 @@
|
||||||
|
commit 9df10fea93b483af6646ef2f7aab35598fbaab2f
|
||||||
|
Author: Nils Ohlmeier [:drno] <drno@ohlmeier.org>
|
||||||
|
Date: Thu Nov 6 12:21:57 2014 -0500
|
||||||
|
|
||||||
|
Bug 1089207: fix parsing of invalid fmtp att r=drno,jesup a=lmandel
|
||||||
|
|
||||||
|
Modified media/webrtc/signaling/src/sipcc/core/sdp/sdp_attr.c
|
||||||
|
diff --git a/media/webrtc/signaling/src/sipcc/core/sdp/sdp_attr.c b/media/webrtc/signaling/src/sipcc/core/sdp/sdp_attr.c
|
||||||
|
index fa5ca2e..33d26c0 100644
|
||||||
|
--- a/media/webrtc/signaling/src/sipcc/core/sdp/sdp_attr.c
|
||||||
|
+++ b/media/webrtc/signaling/src/sipcc/core/sdp/sdp_attr.c
|
||||||
|
@@ -458,7 +458,6 @@ sdp_result_e sdp_parse_attr_fmtp (sdp_t *sdp_p, sdp_attr_t *attr_p,
|
||||||
|
char tmp[SDP_MAX_STRING_LEN];
|
||||||
|
char *src_ptr;
|
||||||
|
char *temp_ptr = NULL;
|
||||||
|
- tinybool flag=FALSE;
|
||||||
|
char *tok=NULL;
|
||||||
|
char *temp=NULL;
|
||||||
|
u16 custom_x=0;
|
||||||
|
@@ -495,29 +494,11 @@ sdp_result_e sdp_parse_attr_fmtp (sdp_t *sdp_p, sdp_attr_t *attr_p,
|
||||||
|
fmtp_p->packetization_mode = 0;
|
||||||
|
fmtp_p->level_asymmetry_allowed = SDP_DEFAULT_LEVEL_ASYMMETRY_ALLOWED_VALUE;
|
||||||
|
|
||||||
|
- /* BEGIN - a typical macro fn to replace '/' with ';' from fmtp line*/
|
||||||
|
- /* This ugly replacement of '/' with ';' is only done because
|
||||||
|
- * econf/MS client sends in this wierd /illegal format.
|
||||||
|
- * fmtp parameters MUST be separated by ';'
|
||||||
|
- */
|
||||||
|
temp_ptr = cpr_strdup(ptr);
|
||||||
|
if (temp_ptr == NULL) {
|
||||||
|
return (SDP_FAILURE);
|
||||||
|
}
|
||||||
|
fmtp_ptr = src_ptr = temp_ptr;
|
||||||
|
- while (flag == FALSE) {
|
||||||
|
- if (*src_ptr == '\n') {
|
||||||
|
- flag = TRUE;
|
||||||
|
- break;
|
||||||
|
- }
|
||||||
|
- if (*src_ptr == '/') {
|
||||||
|
- *src_ptr =';' ;
|
||||||
|
- }
|
||||||
|
- src_ptr++;
|
||||||
|
- }
|
||||||
|
- /* END */
|
||||||
|
- /* Once we move to RFC compliant video codec implementations, the above
|
||||||
|
- * patch should be removed */
|
||||||
|
|
||||||
|
src_ptr = temp_ptr;
|
||||||
|
while (!done) {
|
||||||
|
Modified media/webrtc/signaling/src/sipcc/core/sdp/sdp_main.c
|
||||||
|
diff --git a/media/webrtc/signaling/src/sipcc/core/sdp/sdp_main.c b/media/webrtc/signaling/src/sipcc/core/sdp/sdp_main.c
|
||||||
|
index 0be02aa..9760d4e 100644
|
||||||
|
--- a/media/webrtc/signaling/src/sipcc/core/sdp/sdp_main.c
|
||||||
|
+++ b/media/webrtc/signaling/src/sipcc/core/sdp/sdp_main.c
|
||||||
|
@@ -1002,7 +1002,12 @@ sdp_result_e sdp_parse (sdp_t *sdp_p, char **bufp, u16 len)
|
||||||
|
*/
|
||||||
|
ptr = next_ptr;
|
||||||
|
line_end = sdp_findchar(ptr, "\n");
|
||||||
|
- if (line_end >= (*bufp + len)) {
|
||||||
|
+ if ((line_end >= (*bufp + len)) ||
|
||||||
|
+ (*line_end == '\0')) {
|
||||||
|
+ /* As this does not update the result value the SDP up to this point
|
||||||
|
+ * is still accept as valid. So encountering this is not treated as
|
||||||
|
+ * an error.
|
||||||
|
+ */
|
||||||
|
sdp_parse_error(sdp_p->peerconnection,
|
||||||
|
"%s End of line beyond end of buffer.",
|
||||||
|
sdp_p->debug_str);
|
||||||
|
Modified media/webrtc/signaling/test/sdp_unittests.cpp
|
||||||
|
diff --git a/media/webrtc/signaling/test/sdp_unittests.cpp b/media/webrtc/signaling/test/sdp_unittests.cpp
|
||||||
|
index 51df09b..9f98eed 100644
|
||||||
|
--- a/media/webrtc/signaling/test/sdp_unittests.cpp
|
||||||
|
+++ b/media/webrtc/signaling/test/sdp_unittests.cpp
|
||||||
|
@@ -755,13 +755,13 @@ TEST_F(SdpTest, parseFmtpMaxFs) {
|
||||||
|
u32 val = 0;
|
||||||
|
ParseSdp(kVideoSdp + "a=fmtp:120 max-fs=300;max-fr=30\r\n");
|
||||||
|
ASSERT_EQ(sdp_attr_get_fmtp_max_fs(sdp_ptr_, 1, 0, 1, &val), SDP_SUCCESS);
|
||||||
|
- ASSERT_EQ(val, 300);
|
||||||
|
+ ASSERT_EQ(val, 300U);
|
||||||
|
}
|
||||||
|
TEST_F(SdpTest, parseFmtpMaxFr) {
|
||||||
|
u32 val = 0;
|
||||||
|
ParseSdp(kVideoSdp + "a=fmtp:120 max-fs=300;max-fr=30\r\n");
|
||||||
|
ASSERT_EQ(sdp_attr_get_fmtp_max_fr(sdp_ptr_, 1, 0, 1, &val), SDP_SUCCESS);
|
||||||
|
- ASSERT_EQ(val, 30);
|
||||||
|
+ ASSERT_EQ(val, 30U);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(SdpTest, addFmtpMaxFs) {
|
||||||
|
@@ -789,6 +789,29 @@ TEST_F(SdpTest, addFmtpMaxFsFr) {
|
||||||
|
std::string::npos);
|
||||||
|
}
|
||||||
|
|
||||||
|
+static const std::string kBrokenFmtp =
|
||||||
|
+ "v=0\r\n"
|
||||||
|
+ "o=- 137331303 2 IN IP4 127.0.0.1\r\n"
|
||||||
|
+ "s=SIP Call\r\n"
|
||||||
|
+ "t=0 0\r\n"
|
||||||
|
+ "m=video 56436 RTP/SAVPF 120\r\n"
|
||||||
|
+ "c=IN IP4 198.51.100.7\r\n"
|
||||||
|
+ "a=rtpmap:120 VP8/90000\r\n"
|
||||||
|
+ /* Note: the \0 in this string triggered bz://1089207
|
||||||
|
+ */
|
||||||
|
+ "a=fmtp:120 max-fs=300;max\0fr=30";
|
||||||
|
+
|
||||||
|
+TEST_F(SdpTest, parseBrokenFmtp) {
|
||||||
|
+ u32 val = 0;
|
||||||
|
+ char *buf = const_cast<char *>(kBrokenFmtp.data());
|
||||||
|
+ ResetSdp();
|
||||||
|
+ /* We need to manually invoke the parser here to be able to specify the length
|
||||||
|
+ * of the string beyond the \0 in last line of the string.
|
||||||
|
+ */
|
||||||
|
+ ASSERT_EQ(sdp_parse(sdp_ptr_, &buf, 165), SDP_SUCCESS);
|
||||||
|
+ ASSERT_EQ(sdp_attr_get_fmtp_max_fs(sdp_ptr_, 1, 0, 1, &val), SDP_INVALID_PARAMETER);
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
} // End namespace test.
|
||||||
|
|
||||||
|
int main(int argc, char **argv) {
|
|
@ -0,0 +1,33 @@
|
||||||
|
commit 50c5ca4bacf7cda77c3a7ab1b8d82ded18fb3355
|
||||||
|
Author: Olli Pettay <Olli.Pettay@helsinki.fi>
|
||||||
|
Date: Sun Nov 2 22:01:55 2014 +0200
|
||||||
|
|
||||||
|
Bug 1087633 - Filter out XPConnect wrapped input streams. r=bz, a=lmandel
|
||||||
|
|
||||||
|
Modified content/base/src/nsXMLHttpRequest.h
|
||||||
|
diff --git a/content/base/src/nsXMLHttpRequest.h b/content/base/src/nsXMLHttpRequest.h
|
||||||
|
index b1fc4e3..4ab4f29 100644
|
||||||
|
--- a/content/base/src/nsXMLHttpRequest.h
|
||||||
|
+++ b/content/base/src/nsXMLHttpRequest.h
|
||||||
|
@@ -28,7 +28,8 @@
|
||||||
|
#include "nsIPrincipal.h"
|
||||||
|
#include "nsIScriptObjectPrincipal.h"
|
||||||
|
#include "nsISizeOfEventTarget.h"
|
||||||
|
-
|
||||||
|
+#include "nsIXPConnect.h"
|
||||||
|
+#include "nsIInputStream.h"
|
||||||
|
#include "mozilla/Assertions.h"
|
||||||
|
#include "mozilla/DOMEventTargetHelper.h"
|
||||||
|
#include "mozilla/MemoryReporting.h"
|
||||||
|
@@ -446,6 +447,11 @@ public:
|
||||||
|
void Send(nsIInputStream* aStream, ErrorResult& aRv)
|
||||||
|
{
|
||||||
|
NS_ASSERTION(aStream, "Null should go to string version");
|
||||||
|
+ nsCOMPtr<nsIXPConnectWrappedJS> wjs = do_QueryInterface(aStream);
|
||||||
|
+ if (wjs) {
|
||||||
|
+ aRv.Throw(NS_ERROR_DOM_TYPE_ERR);
|
||||||
|
+ return;
|
||||||
|
+ }
|
||||||
|
aRv = Send(RequestBody(aStream));
|
||||||
|
}
|
||||||
|
void SendAsBinary(const nsAString& aBody, ErrorResult& aRv);
|
|
@ -0,0 +1,400 @@
|
||||||
|
commit 7efadbb03cdffa11ebfc2da3113377d2f33b893b
|
||||||
|
Author: Henri Sivonen <hsivonen@hsivonen.fi>
|
||||||
|
Date: Mon Nov 3 15:23:26 2014 +0200
|
||||||
|
|
||||||
|
Bug 1088635. r=smaug, a=bkerensa
|
||||||
|
|
||||||
|
Modified content/base/src/nsDocument.cpp
|
||||||
|
diff --git a/content/base/src/nsDocument.cpp b/content/base/src/nsDocument.cpp
|
||||||
|
index cbed38d..3493bce 100644
|
||||||
|
--- a/content/base/src/nsDocument.cpp
|
||||||
|
+++ b/content/base/src/nsDocument.cpp
|
||||||
|
@@ -3916,7 +3916,7 @@ nsDocument::InsertChildAt(nsIContent* aKid, uint32_t aIndex,
|
||||||
|
bool aNotify)
|
||||||
|
{
|
||||||
|
if (aKid->IsElement() && GetRootElement()) {
|
||||||
|
- NS_ERROR("Inserting element child when we already have one");
|
||||||
|
+ NS_WARNING("Inserting root element when we already have one");
|
||||||
|
return NS_ERROR_DOM_HIERARCHY_REQUEST_ERR;
|
||||||
|
}
|
||||||
|
|
||||||
|
Modified parser/html/nsHtml5Parser.cpp
|
||||||
|
diff --git a/parser/html/nsHtml5Parser.cpp b/parser/html/nsHtml5Parser.cpp
|
||||||
|
index a485be4..f28adb4 100644
|
||||||
|
--- a/parser/html/nsHtml5Parser.cpp
|
||||||
|
+++ b/parser/html/nsHtml5Parser.cpp
|
||||||
|
@@ -237,7 +237,8 @@ nsHtml5Parser::Parse(const nsAString& aSourceBuffer,
|
||||||
|
* WillBuildModel to be called before the document has had its
|
||||||
|
* script global object set.
|
||||||
|
*/
|
||||||
|
- mExecutor->WillBuildModel(eDTDMode_unknown);
|
||||||
|
+ rv = mExecutor->WillBuildModel(eDTDMode_unknown);
|
||||||
|
+ NS_ENSURE_SUCCESS(rv, rv);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return early if the parser has processed EOF
|
||||||
|
@@ -255,7 +256,7 @@ nsHtml5Parser::Parse(const nsAString& aSourceBuffer,
|
||||||
|
}
|
||||||
|
mDocumentClosed = true;
|
||||||
|
if (!mBlocked && !mInDocumentWrite) {
|
||||||
|
- ParseUntilBlocked();
|
||||||
|
+ return ParseUntilBlocked();
|
||||||
|
}
|
||||||
|
return NS_OK;
|
||||||
|
}
|
||||||
|
@@ -378,7 +379,8 @@ nsHtml5Parser::Parse(const nsAString& aSourceBuffer,
|
||||||
|
|
||||||
|
if (mTreeBuilder->HasScript()) {
|
||||||
|
mTreeBuilder->Flush(); // Move ops to the executor
|
||||||
|
- mExecutor->FlushDocumentWrite(); // run the ops
|
||||||
|
+ rv = mExecutor->FlushDocumentWrite(); // run the ops
|
||||||
|
+ NS_ENSURE_SUCCESS(rv, rv);
|
||||||
|
// Flushing tree ops can cause all sorts of things.
|
||||||
|
// Return early if the parser got terminated.
|
||||||
|
if (mExecutor->IsComplete()) {
|
||||||
|
@@ -437,7 +439,8 @@ nsHtml5Parser::Parse(const nsAString& aSourceBuffer,
|
||||||
|
"Buffer wasn't tokenized to completion?");
|
||||||
|
// Scripting semantics require a forced tree builder flush here
|
||||||
|
mTreeBuilder->Flush(); // Move ops to the executor
|
||||||
|
- mExecutor->FlushDocumentWrite(); // run the ops
|
||||||
|
+ rv = mExecutor->FlushDocumentWrite(); // run the ops
|
||||||
|
+ NS_ENSURE_SUCCESS(rv, rv);
|
||||||
|
} else if (stackBuffer.hasMore()) {
|
||||||
|
// The buffer wasn't tokenized to completion. Tokenize the untokenized
|
||||||
|
// content in order to preload stuff. This content will be retokenized
|
||||||
|
@@ -594,11 +597,13 @@ nsHtml5Parser::IsScriptCreated()
|
||||||
|
/* End nsIParser */
|
||||||
|
|
||||||
|
// not from interface
|
||||||
|
-void
|
||||||
|
+nsresult
|
||||||
|
nsHtml5Parser::ParseUntilBlocked()
|
||||||
|
{
|
||||||
|
- if (mBlocked || mExecutor->IsComplete() || NS_FAILED(mExecutor->IsBroken())) {
|
||||||
|
- return;
|
||||||
|
+ nsresult rv = mExecutor->IsBroken();
|
||||||
|
+ NS_ENSURE_SUCCESS(rv, rv);
|
||||||
|
+ if (mBlocked || mExecutor->IsComplete()) {
|
||||||
|
+ return NS_OK;
|
||||||
|
}
|
||||||
|
NS_ASSERTION(mExecutor->HasStarted(), "Bad life cycle.");
|
||||||
|
NS_ASSERTION(!mInDocumentWrite,
|
||||||
|
@@ -611,7 +616,7 @@ nsHtml5Parser::ParseUntilBlocked()
|
||||||
|
if (mFirstBuffer == mLastBuffer) {
|
||||||
|
if (mExecutor->IsComplete()) {
|
||||||
|
// something like cache manisfests stopped the parse in mid-flight
|
||||||
|
- return;
|
||||||
|
+ return NS_OK;
|
||||||
|
}
|
||||||
|
if (mDocumentClosed) {
|
||||||
|
NS_ASSERTION(!GetStreamParser(),
|
||||||
|
@@ -620,8 +625,10 @@ nsHtml5Parser::ParseUntilBlocked()
|
||||||
|
mTreeBuilder->StreamEnded();
|
||||||
|
mTreeBuilder->Flush();
|
||||||
|
mExecutor->FlushDocumentWrite();
|
||||||
|
+ // The below call does memory cleanup, so call it even if the
|
||||||
|
+ // parser has been marked as broken.
|
||||||
|
mTokenizer->end();
|
||||||
|
- return;
|
||||||
|
+ return NS_OK;
|
||||||
|
}
|
||||||
|
// never release the last buffer.
|
||||||
|
NS_ASSERTION(!mLastBuffer->getStart() && !mLastBuffer->getEnd(),
|
||||||
|
@@ -643,14 +650,14 @@ nsHtml5Parser::ParseUntilBlocked()
|
||||||
|
NS_ASSERTION(mExecutor->IsInFlushLoop(),
|
||||||
|
"How did we come here without being in the flush loop?");
|
||||||
|
}
|
||||||
|
- return; // no more data for now but expecting more
|
||||||
|
+ return NS_OK; // no more data for now but expecting more
|
||||||
|
}
|
||||||
|
mFirstBuffer = mFirstBuffer->next;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mBlocked || mExecutor->IsComplete()) {
|
||||||
|
- return;
|
||||||
|
+ return NS_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
// now we have a non-empty buffer
|
||||||
|
@@ -667,10 +674,11 @@ nsHtml5Parser::ParseUntilBlocked()
|
||||||
|
}
|
||||||
|
if (mTreeBuilder->HasScript()) {
|
||||||
|
mTreeBuilder->Flush();
|
||||||
|
- mExecutor->FlushDocumentWrite();
|
||||||
|
+ nsresult rv = mExecutor->FlushDocumentWrite();
|
||||||
|
+ NS_ENSURE_SUCCESS(rv, rv);
|
||||||
|
}
|
||||||
|
if (mBlocked) {
|
||||||
|
- return;
|
||||||
|
+ return NS_OK;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
Modified parser/html/nsHtml5Parser.h
|
||||||
|
diff --git a/parser/html/nsHtml5Parser.h b/parser/html/nsHtml5Parser.h
|
||||||
|
index aff79c7..e2ef2f8 100644
|
||||||
|
--- a/parser/html/nsHtml5Parser.h
|
||||||
|
+++ b/parser/html/nsHtml5Parser.h
|
||||||
|
@@ -262,7 +262,7 @@ class nsHtml5Parser : public nsIParser,
|
||||||
|
/**
|
||||||
|
* Parse until pending data is exhausted or a script blocks the parser
|
||||||
|
*/
|
||||||
|
- void ParseUntilBlocked();
|
||||||
|
+ nsresult ParseUntilBlocked();
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
Modified parser/html/nsHtml5StreamParser.cpp
|
||||||
|
diff --git a/parser/html/nsHtml5StreamParser.cpp b/parser/html/nsHtml5StreamParser.cpp
|
||||||
|
index 4790568..7e3917b 100644
|
||||||
|
--- a/parser/html/nsHtml5StreamParser.cpp
|
||||||
|
+++ b/parser/html/nsHtml5StreamParser.cpp
|
||||||
|
@@ -796,7 +796,7 @@ nsHtml5StreamParser::WriteStreamBytes(const uint8_t* aFromSegment,
|
||||||
|
// NS_HTML5_STREAM_PARSER_READ_BUFFER_SIZE.
|
||||||
|
if (!mLastBuffer) {
|
||||||
|
NS_WARNING("mLastBuffer should not be null!");
|
||||||
|
- MarkAsBroken();
|
||||||
|
+ MarkAsBroken(NS_ERROR_NULL_POINTER);
|
||||||
|
return NS_ERROR_NULL_POINTER;
|
||||||
|
}
|
||||||
|
if (mLastBuffer->getEnd() == NS_HTML5_STREAM_PARSER_READ_BUFFER_SIZE) {
|
||||||
|
@@ -902,7 +902,8 @@ nsHtml5StreamParser::OnStartRequest(nsIRequest* aRequest, nsISupports* aContext)
|
||||||
|
* WillBuildModel to be called before the document has had its
|
||||||
|
* script global object set.
|
||||||
|
*/
|
||||||
|
- mExecutor->WillBuildModel(eDTDMode_unknown);
|
||||||
|
+ rv = mExecutor->WillBuildModel(eDTDMode_unknown);
|
||||||
|
+ NS_ENSURE_SUCCESS(rv, rv);
|
||||||
|
|
||||||
|
nsRefPtr<nsHtml5OwningUTF16Buffer> newBuf =
|
||||||
|
nsHtml5OwningUTF16Buffer::FalliblyCreate(
|
||||||
|
@@ -1003,8 +1004,9 @@ nsHtml5StreamParser::DoStopRequest()
|
||||||
|
|
||||||
|
if (!mUnicodeDecoder) {
|
||||||
|
uint32_t writeCount;
|
||||||
|
- if (NS_FAILED(FinalizeSniffing(nullptr, 0, &writeCount, 0))) {
|
||||||
|
- MarkAsBroken();
|
||||||
|
+ nsresult rv;
|
||||||
|
+ if (NS_FAILED(rv = FinalizeSniffing(nullptr, 0, &writeCount, 0))) {
|
||||||
|
+ MarkAsBroken(rv);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else if (mFeedChardet) {
|
||||||
|
@@ -1076,7 +1078,7 @@ nsHtml5StreamParser::DoDataAvailable(const uint8_t* aBuffer, uint32_t aLength)
|
||||||
|
rv = SniffStreamBytes(aBuffer, aLength, &writeCount);
|
||||||
|
}
|
||||||
|
if (NS_FAILED(rv)) {
|
||||||
|
- MarkAsBroken();
|
||||||
|
+ MarkAsBroken(rv);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
NS_ASSERTION(writeCount == aLength, "Wrong number of stream bytes written/sniffed.");
|
||||||
|
@@ -1662,13 +1664,13 @@ nsHtml5StreamParser::TimerFlush()
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
-nsHtml5StreamParser::MarkAsBroken()
|
||||||
|
+nsHtml5StreamParser::MarkAsBroken(nsresult aRv)
|
||||||
|
{
|
||||||
|
NS_ASSERTION(IsParserThread(), "Wrong thread!");
|
||||||
|
mTokenizerMutex.AssertCurrentThreadOwns();
|
||||||
|
|
||||||
|
Terminate();
|
||||||
|
- mTreeBuilder->MarkAsBroken();
|
||||||
|
+ mTreeBuilder->MarkAsBroken(aRv);
|
||||||
|
mozilla::DebugOnly<bool> hadOps = mTreeBuilder->Flush(false);
|
||||||
|
NS_ASSERTION(hadOps, "Should have had the markAsBroken op!");
|
||||||
|
if (NS_FAILED(NS_DispatchToMainThread(mExecutorFlusher))) {
|
||||||
|
Modified parser/html/nsHtml5StreamParser.h
|
||||||
|
diff --git a/parser/html/nsHtml5StreamParser.h b/parser/html/nsHtml5StreamParser.h
|
||||||
|
index c7dcbbe..476ef16 100644
|
||||||
|
--- a/parser/html/nsHtml5StreamParser.h
|
||||||
|
+++ b/parser/html/nsHtml5StreamParser.h
|
||||||
|
@@ -218,7 +218,7 @@ class nsHtml5StreamParser : public nsICharsetDetectionObserver {
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
- void MarkAsBroken();
|
||||||
|
+ void MarkAsBroken(nsresult aRv);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Marks the stream parser as interrupted. If you ever add calls to this
|
||||||
|
Modified parser/html/nsHtml5TreeBuilderCppSupplement.h
|
||||||
|
diff --git a/parser/html/nsHtml5TreeBuilderCppSupplement.h b/parser/html/nsHtml5TreeBuilderCppSupplement.h
|
||||||
|
index 4cd5c7c..1e65394 100644
|
||||||
|
--- a/parser/html/nsHtml5TreeBuilderCppSupplement.h
|
||||||
|
+++ b/parser/html/nsHtml5TreeBuilderCppSupplement.h
|
||||||
|
@@ -949,14 +949,14 @@ nsHtml5TreeBuilder::DropHandles()
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
-nsHtml5TreeBuilder::MarkAsBroken()
|
||||||
|
+nsHtml5TreeBuilder::MarkAsBroken(nsresult aRv)
|
||||||
|
{
|
||||||
|
if (MOZ_UNLIKELY(mBuilder)) {
|
||||||
|
MOZ_ASSUME_UNREACHABLE("Must not call this with builder.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
mOpQueue.Clear(); // Previous ops don't matter anymore
|
||||||
|
- mOpQueue.AppendElement()->Init(eTreeOpMarkAsBroken);
|
||||||
|
+ mOpQueue.AppendElement()->Init(aRv);
|
||||||
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
Modified parser/html/nsHtml5TreeBuilderHSupplement.h
|
||||||
|
diff --git a/parser/html/nsHtml5TreeBuilderHSupplement.h b/parser/html/nsHtml5TreeBuilderHSupplement.h
|
||||||
|
index a321e80..8d380eb 100644
|
||||||
|
--- a/parser/html/nsHtml5TreeBuilderHSupplement.h
|
||||||
|
+++ b/parser/html/nsHtml5TreeBuilderHSupplement.h
|
||||||
|
@@ -223,4 +223,4 @@
|
||||||
|
|
||||||
|
void errEndWithUnclosedElements(nsIAtom* aName);
|
||||||
|
|
||||||
|
- void MarkAsBroken();
|
||||||
|
+ void MarkAsBroken(nsresult aRv);
|
||||||
|
Modified parser/html/nsHtml5TreeOpExecutor.cpp
|
||||||
|
diff --git a/parser/html/nsHtml5TreeOpExecutor.cpp b/parser/html/nsHtml5TreeOpExecutor.cpp
|
||||||
|
index ebcafca..6c52e5f 100644
|
||||||
|
--- a/parser/html/nsHtml5TreeOpExecutor.cpp
|
||||||
|
+++ b/parser/html/nsHtml5TreeOpExecutor.cpp
|
||||||
|
@@ -411,7 +411,11 @@ nsHtml5TreeOpExecutor::RunFlushLoop()
|
||||||
|
GetParser()->GetStreamParser();
|
||||||
|
// Now parse content left in the document.write() buffer queue if any.
|
||||||
|
// This may generate tree ops on its own or dequeue a speculation.
|
||||||
|
- GetParser()->ParseUntilBlocked();
|
||||||
|
+ nsresult rv = GetParser()->ParseUntilBlocked();
|
||||||
|
+ if (NS_FAILED(rv)) {
|
||||||
|
+ MarkAsBroken(rv);
|
||||||
|
+ return;
|
||||||
|
+ }
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mOpQueue.IsEmpty()) {
|
||||||
|
@@ -496,21 +500,24 @@ nsHtml5TreeOpExecutor::RunFlushLoop()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
-void
|
||||||
|
+nsresult
|
||||||
|
nsHtml5TreeOpExecutor::FlushDocumentWrite()
|
||||||
|
{
|
||||||
|
+ nsresult rv = IsBroken();
|
||||||
|
+ NS_ENSURE_SUCCESS(rv, rv);
|
||||||
|
+
|
||||||
|
FlushSpeculativeLoads(); // Make sure speculative loads never start after the
|
||||||
|
// corresponding normal loads for the same URLs.
|
||||||
|
|
||||||
|
if (MOZ_UNLIKELY(!mParser)) {
|
||||||
|
// The parse has ended.
|
||||||
|
mOpQueue.Clear(); // clear in order to be able to assert in destructor
|
||||||
|
- return;
|
||||||
|
+ return rv;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (mFlushState != eNotFlushing) {
|
||||||
|
// XXX Can this happen? In case it can, let's avoid crashing.
|
||||||
|
- return;
|
||||||
|
+ return rv;
|
||||||
|
}
|
||||||
|
|
||||||
|
mFlushState = eInFlush;
|
||||||
|
@@ -545,7 +552,7 @@ nsHtml5TreeOpExecutor::FlushDocumentWrite()
|
||||||
|
}
|
||||||
|
NS_ASSERTION(mFlushState == eInDocUpdate,
|
||||||
|
"Tried to perform tree op outside update batch.");
|
||||||
|
- nsresult rv = iter->Perform(this, &scriptElement);
|
||||||
|
+ rv = iter->Perform(this, &scriptElement);
|
||||||
|
if (NS_FAILED(rv)) {
|
||||||
|
MarkAsBroken(rv);
|
||||||
|
break;
|
||||||
|
@@ -560,13 +567,14 @@ nsHtml5TreeOpExecutor::FlushDocumentWrite()
|
||||||
|
|
||||||
|
if (MOZ_UNLIKELY(!mParser)) {
|
||||||
|
// Ending the doc update caused a call to nsIParser::Terminate().
|
||||||
|
- return;
|
||||||
|
+ return rv;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (scriptElement) {
|
||||||
|
// must be tail call when mFlushState is eNotFlushing
|
||||||
|
RunScript(scriptElement);
|
||||||
|
}
|
||||||
|
+ return rv;
|
||||||
|
}
|
||||||
|
|
||||||
|
// copied from HTML content sink
|
||||||
|
Modified parser/html/nsHtml5TreeOpExecutor.h
|
||||||
|
diff --git a/parser/html/nsHtml5TreeOpExecutor.h b/parser/html/nsHtml5TreeOpExecutor.h
|
||||||
|
index 9617dcb..1f81448 100644
|
||||||
|
--- a/parser/html/nsHtml5TreeOpExecutor.h
|
||||||
|
+++ b/parser/html/nsHtml5TreeOpExecutor.h
|
||||||
|
@@ -173,7 +173,7 @@ class nsHtml5TreeOpExecutor : public nsHtml5DocumentBuilder,
|
||||||
|
|
||||||
|
void RunFlushLoop();
|
||||||
|
|
||||||
|
- void FlushDocumentWrite();
|
||||||
|
+ nsresult FlushDocumentWrite();
|
||||||
|
|
||||||
|
void MaybeSuspend();
|
||||||
|
|
||||||
|
Modified parser/html/nsHtml5TreeOperation.cpp
|
||||||
|
diff --git a/parser/html/nsHtml5TreeOperation.cpp b/parser/html/nsHtml5TreeOperation.cpp
|
||||||
|
index 48b71dc..7ad65247 100644
|
||||||
|
--- a/parser/html/nsHtml5TreeOperation.cpp
|
||||||
|
+++ b/parser/html/nsHtml5TreeOperation.cpp
|
||||||
|
@@ -214,6 +214,9 @@ nsHtml5TreeOperation::AppendToDocument(nsIContent* aNode,
|
||||||
|
nsIDocument* doc = aBuilder->GetDocument();
|
||||||
|
uint32_t childCount = doc->GetChildCount();
|
||||||
|
rv = doc->AppendChildTo(aNode, false);
|
||||||
|
+ if (rv == NS_ERROR_DOM_HIERARCHY_REQUEST_ERR) {
|
||||||
|
+ return NS_OK;
|
||||||
|
+ }
|
||||||
|
NS_ENSURE_SUCCESS(rv, rv);
|
||||||
|
nsNodeUtils::ContentInserted(doc, aNode, childCount);
|
||||||
|
|
||||||
|
@@ -739,8 +742,7 @@ nsHtml5TreeOperation::Perform(nsHtml5TreeOpExecutor* aBuilder,
|
||||||
|
return NS_OK;
|
||||||
|
}
|
||||||
|
case eTreeOpMarkAsBroken: {
|
||||||
|
- aBuilder->MarkAsBroken(NS_ERROR_OUT_OF_MEMORY);
|
||||||
|
- return NS_OK;
|
||||||
|
+ return mOne.result;
|
||||||
|
}
|
||||||
|
case eTreeOpRunScript: {
|
||||||
|
nsIContent* node = *(mOne.node);
|
||||||
|
Modified parser/html/nsHtml5TreeOperation.h
|
||||||
|
diff --git a/parser/html/nsHtml5TreeOperation.h b/parser/html/nsHtml5TreeOperation.h
|
||||||
|
index 2727733..06d0274 100644
|
||||||
|
--- a/parser/html/nsHtml5TreeOperation.h
|
||||||
|
+++ b/parser/html/nsHtml5TreeOperation.h
|
||||||
|
@@ -435,6 +435,15 @@ class nsHtml5TreeOperation {
|
||||||
|
mFour.integer = aInt;
|
||||||
|
}
|
||||||
|
|
||||||
|
+ inline void Init(nsresult aRv)
|
||||||
|
+ {
|
||||||
|
+ NS_PRECONDITION(mOpCode == eTreeOpUninitialized,
|
||||||
|
+ "Op code must be uninitialized when initializing.");
|
||||||
|
+ NS_PRECONDITION(NS_FAILED(aRv), "Initialized tree op with non-failure.");
|
||||||
|
+ mOpCode = eTreeOpMarkAsBroken;
|
||||||
|
+ mOne.result = aRv;
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
inline void InitAddClass(nsIContentHandle* aNode, const char16_t* aClass)
|
||||||
|
{
|
||||||
|
NS_PRECONDITION(mOpCode == eTreeOpUninitialized,
|
||||||
|
@@ -487,11 +496,12 @@ class nsHtml5TreeOperation {
|
||||||
|
nsIAtom* atom;
|
||||||
|
nsHtml5HtmlAttributes* attributes;
|
||||||
|
nsHtml5DocumentMode mode;
|
||||||
|
- char16_t* unicharPtr;
|
||||||
|
+ char16_t* unicharPtr;
|
||||||
|
char* charPtr;
|
||||||
|
nsHtml5TreeOperationStringPair* stringPair;
|
||||||
|
nsAHtml5TreeBuilderState* state;
|
||||||
|
int32_t integer;
|
||||||
|
+ nsresult result;
|
||||||
|
} mOne, mTwo, mThree, mFour;
|
||||||
|
};
|
||||||
|
|
|
@ -0,0 +1,154 @@
|
||||||
|
commit a58cea744ac5b93b99a66554e1029b2c7aa3255d
|
||||||
|
Author: Matthew Gregan <kinetik@flim.org>
|
||||||
|
Date: Tue Nov 11 08:58:52 2014 +1300
|
||||||
|
|
||||||
|
Bug 1085175. r=roc, a=dveditz
|
||||||
|
|
||||||
|
Modified content/media/MediaCache.cpp
|
||||||
|
diff --git a/content/media/MediaCache.cpp b/content/media/MediaCache.cpp
|
||||||
|
index 598d905..c99f724 100644
|
||||||
|
--- a/content/media/MediaCache.cpp
|
||||||
|
+++ b/content/media/MediaCache.cpp
|
||||||
|
@@ -1174,6 +1174,7 @@ MediaCache::Update()
|
||||||
|
// Figure out where we should be reading from. It's the first
|
||||||
|
// uncached byte after the current mStreamOffset.
|
||||||
|
int64_t dataOffset = stream->GetCachedDataEndInternal(stream->mStreamOffset);
|
||||||
|
+ MOZ_ASSERT(dataOffset >= 0);
|
||||||
|
|
||||||
|
// Compute where we'd actually seek to to read at readOffset
|
||||||
|
int64_t desiredOffset = dataOffset;
|
||||||
|
@@ -1702,6 +1703,7 @@ MediaCacheStream::NotifyDataStarted(int64_t aOffset)
|
||||||
|
ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
|
||||||
|
NS_WARN_IF_FALSE(aOffset == mChannelOffset,
|
||||||
|
"Server is giving us unexpected offset");
|
||||||
|
+ MOZ_ASSERT(aOffset >= 0);
|
||||||
|
mChannelOffset = aOffset;
|
||||||
|
if (mStreamLength >= 0) {
|
||||||
|
// If we started reading at a certain offset, then for sure
|
||||||
|
@@ -2118,23 +2120,28 @@ MediaCacheStream::Seek(int32_t aWhence, int64_t aOffset)
|
||||||
|
return NS_ERROR_FAILURE;
|
||||||
|
|
||||||
|
int64_t oldOffset = mStreamOffset;
|
||||||
|
+ int64_t newOffset = mStreamOffset;
|
||||||
|
switch (aWhence) {
|
||||||
|
case PR_SEEK_END:
|
||||||
|
if (mStreamLength < 0)
|
||||||
|
return NS_ERROR_FAILURE;
|
||||||
|
- mStreamOffset = mStreamLength + aOffset;
|
||||||
|
+ newOffset = mStreamLength + aOffset;
|
||||||
|
break;
|
||||||
|
case PR_SEEK_CUR:
|
||||||
|
- mStreamOffset += aOffset;
|
||||||
|
+ newOffset += aOffset;
|
||||||
|
break;
|
||||||
|
case PR_SEEK_SET:
|
||||||
|
- mStreamOffset = aOffset;
|
||||||
|
+ newOffset = aOffset;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
NS_ERROR("Unknown whence");
|
||||||
|
return NS_ERROR_FAILURE;
|
||||||
|
}
|
||||||
|
|
||||||
|
+ if (newOffset < 0)
|
||||||
|
+ return NS_ERROR_FAILURE;
|
||||||
|
+ mStreamOffset = newOffset;
|
||||||
|
+
|
||||||
|
CACHE_LOG(PR_LOG_DEBUG, ("Stream %p Seek to %lld", this, (long long)mStreamOffset));
|
||||||
|
gMediaCache->NoteSeek(this, oldOffset);
|
||||||
|
|
||||||
|
@@ -2176,11 +2183,10 @@ MediaCacheStream::Read(char* aBuffer, uint32_t aCount, uint32_t* aBytes)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
size = std::min(size, bytesRemaining);
|
||||||
|
- // Clamp size until 64-bit file size issues (bug 500784) are fixed.
|
||||||
|
+ // Clamp size until 64-bit file size issues are fixed.
|
||||||
|
size = std::min(size, int64_t(INT32_MAX));
|
||||||
|
}
|
||||||
|
|
||||||
|
- int32_t bytes;
|
||||||
|
int32_t cacheBlock = streamBlock < mBlocks.Length() ? mBlocks[streamBlock] : -1;
|
||||||
|
if (cacheBlock < 0) {
|
||||||
|
// We don't have a complete cached block here.
|
||||||
|
@@ -2208,7 +2214,10 @@ MediaCacheStream::Read(char* aBuffer, uint32_t aCount, uint32_t* aBytes)
|
||||||
|
// We can just use the data in mPartialBlockBuffer. In fact we should
|
||||||
|
// use it rather than waiting for the block to fill and land in
|
||||||
|
// the cache.
|
||||||
|
- bytes = std::min<int64_t>(size, streamWithPartialBlock->mChannelOffset - mStreamOffset);
|
||||||
|
+ int64_t bytes = std::min<int64_t>(size, streamWithPartialBlock->mChannelOffset - mStreamOffset);
|
||||||
|
+ // Clamp bytes until 64-bit file size issues are fixed.
|
||||||
|
+ bytes = std::min(bytes, int64_t(INT32_MAX));
|
||||||
|
+ NS_ABORT_IF_FALSE(bytes >= 0 && bytes <= aCount, "Bytes out of range.");
|
||||||
|
memcpy(aBuffer,
|
||||||
|
reinterpret_cast<char*>(streamWithPartialBlock->mPartialBlockBuffer.get()) + offsetInStreamBlock, bytes);
|
||||||
|
if (mCurrentMode == MODE_METADATA) {
|
||||||
|
@@ -2232,6 +2241,7 @@ MediaCacheStream::Read(char* aBuffer, uint32_t aCount, uint32_t* aBytes)
|
||||||
|
gMediaCache->NoteBlockUsage(this, cacheBlock, mCurrentMode, TimeStamp::Now());
|
||||||
|
|
||||||
|
int64_t offset = cacheBlock*BLOCK_SIZE + offsetInStreamBlock;
|
||||||
|
+ int32_t bytes;
|
||||||
|
NS_ABORT_IF_FALSE(size >= 0 && size <= INT32_MAX, "Size out of range.");
|
||||||
|
nsresult rv = gMediaCache->ReadCacheFile(offset, aBuffer + count, int32_t(size), &bytes);
|
||||||
|
if (NS_FAILED(rv)) {
|
||||||
|
@@ -2268,9 +2278,7 @@ MediaCacheStream::ReadAt(int64_t aOffset, char* aBuffer,
|
||||||
|
}
|
||||||
|
|
||||||
|
nsresult
|
||||||
|
-MediaCacheStream::ReadFromCache(char* aBuffer,
|
||||||
|
- int64_t aOffset,
|
||||||
|
- int64_t aCount)
|
||||||
|
+MediaCacheStream::ReadFromCache(char* aBuffer, int64_t aOffset, int64_t aCount)
|
||||||
|
{
|
||||||
|
ReentrantMonitorAutoEnter mon(gMediaCache->GetReentrantMonitor());
|
||||||
|
if (mClosed)
|
||||||
|
@@ -2292,7 +2300,7 @@ MediaCacheStream::ReadFromCache(char* aBuffer,
|
||||||
|
return NS_ERROR_FAILURE;
|
||||||
|
}
|
||||||
|
size = std::min(size, bytesRemaining);
|
||||||
|
- // Clamp size until 64-bit file size issues (bug 500784) are fixed.
|
||||||
|
+ // Clamp size until 64-bit file size issues are fixed.
|
||||||
|
size = std::min(size, int64_t(INT32_MAX));
|
||||||
|
}
|
||||||
|
|
||||||
|
@@ -2303,7 +2311,10 @@ MediaCacheStream::ReadFromCache(char* aBuffer,
|
||||||
|
// We can just use the data in mPartialBlockBuffer. In fact we should
|
||||||
|
// use it rather than waiting for the block to fill and land in
|
||||||
|
// the cache.
|
||||||
|
- bytes = std::min<int64_t>(size, mChannelOffset - streamOffset);
|
||||||
|
+ // Clamp bytes until 64-bit file size issues are fixed.
|
||||||
|
+ int64_t toCopy = std::min<int64_t>(size, mChannelOffset - streamOffset);
|
||||||
|
+ bytes = std::min(toCopy, int64_t(INT32_MAX));
|
||||||
|
+ NS_ABORT_IF_FALSE(bytes >= 0 && bytes <= toCopy, "Bytes out of range.");
|
||||||
|
memcpy(aBuffer + count,
|
||||||
|
reinterpret_cast<char*>(mPartialBlockBuffer.get()) + offsetInStreamBlock, bytes);
|
||||||
|
} else {
|
||||||
|
Modified media/libnestegg/include/nestegg-stdint.h
|
||||||
|
diff --git a/media/libnestegg/include/nestegg-stdint.h b/media/libnestegg/include/nestegg-stdint.h
|
||||||
|
index 599a7a5..c315991 100644
|
||||||
|
--- a/media/libnestegg/include/nestegg-stdint.h
|
||||||
|
+++ b/media/libnestegg/include/nestegg-stdint.h
|
||||||
|
@@ -1,6 +1,9 @@
|
||||||
|
#ifdef _WIN32
|
||||||
|
typedef __int64 int64_t;
|
||||||
|
typedef unsigned __int64 uint64_t;
|
||||||
|
+#if !defined(INT64_MAX)
|
||||||
|
+#define INT64_MAX 9223372036854775807LL
|
||||||
|
+#endif
|
||||||
|
#else
|
||||||
|
#include <stdint.h>
|
||||||
|
#endif
|
||||||
|
Modified media/libnestegg/src/nestegg.c
|
||||||
|
diff --git a/media/libnestegg/src/nestegg.c b/media/libnestegg/src/nestegg.c
|
||||||
|
index 8813cf2..56884d7 100644
|
||||||
|
--- a/media/libnestegg/src/nestegg.c
|
||||||
|
+++ b/media/libnestegg/src/nestegg.c
|
||||||
|
@@ -1950,6 +1950,9 @@ nestegg_offset_seek(nestegg * ctx, uint64_t offset)
|
||||||
|
{
|
||||||
|
int r;
|
||||||
|
|
||||||
|
+ if (offset > INT64_MAX)
|
||||||
|
+ return -1;
|
||||||
|
+
|
||||||
|
/* Seek and set up parser state for segment-level element (Cluster). */
|
||||||
|
r = ne_io_seek(ctx->io, offset, NESTEGG_SEEK_SET);
|
||||||
|
if (r != 0)
|
|
@ -0,0 +1,34 @@
|
||||||
|
commit 7a8497c0df722b1ed145b99a82c71ed1f7b1d6ce
|
||||||
|
Author: Markus Stange <mstange@themasta.com>
|
||||||
|
Date: Thu Oct 9 21:26:27 2014 -0400
|
||||||
|
|
||||||
|
Bug 1074280 - Use AsContainerLayer() in order to avoid a bad cast. r=roc, a=bkerensa
|
||||||
|
|
||||||
|
Modified gfx/layers/basic/BasicLayerManager.cpp
|
||||||
|
diff --git a/gfx/layers/basic/BasicLayerManager.cpp b/gfx/layers/basic/BasicLayerManager.cpp
|
||||||
|
index 5a3a1f6..ff42bc0 100644
|
||||||
|
--- a/gfx/layers/basic/BasicLayerManager.cpp
|
||||||
|
+++ b/gfx/layers/basic/BasicLayerManager.cpp
|
||||||
|
@@ -901,18 +901,17 @@ BasicLayerManager::PaintLayer(gfxContext* aTarget,
|
||||||
|
RenderTraceScope trace("BasicLayerManager::PaintLayer", "707070");
|
||||||
|
|
||||||
|
const nsIntRect* clipRect = aLayer->GetEffectiveClipRect();
|
||||||
|
- // aLayer might not be a container layer, but if so we take care not to use
|
||||||
|
- // the container variable
|
||||||
|
- BasicContainerLayer* container = static_cast<BasicContainerLayer*>(aLayer);
|
||||||
|
- bool needsGroup = aLayer->GetFirstChild() &&
|
||||||
|
+ BasicContainerLayer* container =
|
||||||
|
+ static_cast<BasicContainerLayer*>(aLayer->AsContainerLayer());
|
||||||
|
+ bool needsGroup = container &&
|
||||||
|
container->UseIntermediateSurface();
|
||||||
|
BasicImplData* data = ToData(aLayer);
|
||||||
|
bool needsClipToVisibleRegion =
|
||||||
|
data->GetClipToVisibleRegion() && !aLayer->AsThebesLayer();
|
||||||
|
- NS_ASSERTION(needsGroup || !aLayer->GetFirstChild() ||
|
||||||
|
+ NS_ASSERTION(needsGroup || !container ||
|
||||||
|
container->GetOperator() == CompositionOp::OP_OVER,
|
||||||
|
"non-OVER operator should have forced UseIntermediateSurface");
|
||||||
|
- NS_ASSERTION(!aLayer->GetFirstChild() || !aLayer->GetMaskLayer() ||
|
||||||
|
+ NS_ASSERTION(!container || !aLayer->GetMaskLayer() ||
|
||||||
|
container->UseIntermediateSurface(),
|
||||||
|
"ContainerLayer with mask layer should force UseIntermediateSurface");
|
|
@ -0,0 +1,115 @@
|
||||||
|
Search for xfce4 panel plugins in the directories specified
|
||||||
|
in XDG_DATA_DIRS and X_XFCE4_LIB_DIRS. For discussion of the
|
||||||
|
relevant issues, see:
|
||||||
|
|
||||||
|
https://bugzilla.xfce.org/show_bug.cgi?id=5455
|
||||||
|
|
||||||
|
Patch by Mark H Weaver <mhw@netris.org>
|
||||||
|
|
||||||
|
--- xfce4-panel-4.10.0/panel/panel-module.c.orig 2012-04-28 16:31:35.000000000 -0400
|
||||||
|
+++ xfce4-panel-4.10.0/panel/panel-module.c 2014-12-14 01:31:55.728107386 -0500
|
||||||
|
@@ -35,8 +35,14 @@
|
||||||
|
#include <panel/panel-plugin-external-wrapper.h>
|
||||||
|
#include <panel/panel-plugin-external-46.h>
|
||||||
|
|
||||||
|
-#define PANEL_PLUGINS_LIB_DIR (LIBDIR G_DIR_SEPARATOR_S "panel" G_DIR_SEPARATOR_S "plugins")
|
||||||
|
-#define PANEL_PLUGINS_LIB_DIR_OLD (LIBDIR G_DIR_SEPARATOR_S "panel-plugins")
|
||||||
|
+#define PANEL_PLUGINS_LIB_DIR_TAIL (G_DIR_SEPARATOR_S "panel" G_DIR_SEPARATOR_S "plugins")
|
||||||
|
+#define PANEL_PLUGINS_LIB_DIR_TAIL_OLD (G_DIR_SEPARATOR_S "panel-plugins")
|
||||||
|
+
|
||||||
|
+static const gchar *plugins_lib_dir_tails[] =
|
||||||
|
+{
|
||||||
|
+ PANEL_PLUGINS_LIB_DIR_TAIL,
|
||||||
|
+ PANEL_PLUGINS_LIB_DIR_TAIL_OLD
|
||||||
|
+};
|
||||||
|
|
||||||
|
|
||||||
|
typedef enum _PanelModuleRunMode PanelModuleRunMode;
|
||||||
|
@@ -335,21 +341,39 @@
|
||||||
|
/* show a messsage if the old module path key still exists */
|
||||||
|
g_message ("Plugin %s: The \"X-XFCE-Module-Path\" key is "
|
||||||
|
"ignored in \"%s\", the panel will look for the "
|
||||||
|
- "module in %s. See bug #5455 why this decision was made",
|
||||||
|
- name, filename, PANEL_PLUGINS_LIB_DIR);
|
||||||
|
+ "module in DIR%s for each DIR in $X_XFCE4_LIB_DIRS "
|
||||||
|
+ "(%s by default). See bug #5455 for discussion.",
|
||||||
|
+ name, filename, PANEL_PLUGINS_LIB_DIR_TAIL, LIBDIR);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
- path = g_module_build_path (PANEL_PLUGINS_LIB_DIR, module_name);
|
||||||
|
- found = g_file_test (path, G_FILE_TEST_EXISTS);
|
||||||
|
+ /* search for module */
|
||||||
|
+ {
|
||||||
|
+ gchar *dirs_string;
|
||||||
|
+ gchar **dirs;
|
||||||
|
+ int i, j;
|
||||||
|
+
|
||||||
|
+ dirs_string = (gchar *) g_getenv ("X_XFCE4_LIB_DIRS");
|
||||||
|
+ if (!dirs_string)
|
||||||
|
+ dirs_string = LIBDIR;
|
||||||
|
+ dirs = g_strsplit (dirs_string, G_SEARCHPATH_SEPARATOR_S, 0);
|
||||||
|
+
|
||||||
|
+ found = FALSE;
|
||||||
|
+ path = NULL;
|
||||||
|
+
|
||||||
|
+ for (i = 0; !found && dirs[i] != NULL; i++)
|
||||||
|
+ for (j = 0; !found && j < G_N_ELEMENTS (plugins_lib_dir_tails); j++)
|
||||||
|
+ {
|
||||||
|
+ gchar *dir = g_strconcat (dirs[i], plugins_lib_dir_tails[j], NULL);
|
||||||
|
+
|
||||||
|
+ g_free (path);
|
||||||
|
+ path = g_module_build_path (dir, module_name);
|
||||||
|
+ found = g_file_test (path, G_FILE_TEST_EXISTS);
|
||||||
|
+ g_free (dir);
|
||||||
|
+ }
|
||||||
|
|
||||||
|
- if (!found)
|
||||||
|
- {
|
||||||
|
- /* deprecated location for module plugin directories */
|
||||||
|
- g_free (path);
|
||||||
|
- path = g_module_build_path (PANEL_PLUGINS_LIB_DIR_OLD, module_name);
|
||||||
|
- found = g_file_test (path, G_FILE_TEST_EXISTS);
|
||||||
|
- }
|
||||||
|
+ g_strfreev (dirs);
|
||||||
|
+ }
|
||||||
|
|
||||||
|
if (G_LIKELY (found))
|
||||||
|
{
|
||||||
|
--- xfce4-panel-4.10.0/panel/panel-module-factory.c.orig 2012-04-28 16:31:35.000000000 -0400
|
||||||
|
+++ xfce4-panel-4.10.0/panel/panel-module-factory.c 2014-12-13 23:55:27.439404812 -0500
|
||||||
|
@@ -42,6 +42,11 @@
|
||||||
|
#define PANEL_PLUGINS_DATA_DIR (DATADIR G_DIR_SEPARATOR_S "panel" G_DIR_SEPARATOR_S "plugins")
|
||||||
|
#define PANEL_PLUGINS_DATA_DIR_OLD (DATADIR G_DIR_SEPARATOR_S "panel-plugins")
|
||||||
|
|
||||||
|
+static const gchar *plugins_data_dir_tails[] =
|
||||||
|
+{
|
||||||
|
+ (G_DIR_SEPARATOR_S "xfce4" G_DIR_SEPARATOR_S "panel" G_DIR_SEPARATOR_S "plugins"),
|
||||||
|
+ (G_DIR_SEPARATOR_S "xfce4" G_DIR_SEPARATOR_S "panel-plugins")
|
||||||
|
+};
|
||||||
|
|
||||||
|
|
||||||
|
static void panel_module_factory_finalize (GObject *object);
|
||||||
|
@@ -223,8 +228,22 @@
|
||||||
|
panel_module_factory_load_modules (PanelModuleFactory *factory,
|
||||||
|
gboolean warn_if_known)
|
||||||
|
{
|
||||||
|
+ const gchar * const * system_data_dirs;
|
||||||
|
+ int i, j;
|
||||||
|
+
|
||||||
|
panel_return_if_fail (PANEL_IS_MODULE_FACTORY (factory));
|
||||||
|
|
||||||
|
+ system_data_dirs = g_get_system_data_dirs ();
|
||||||
|
+ for (i = 0; system_data_dirs[i] != NULL; i++)
|
||||||
|
+ for (j = 0; j < G_N_ELEMENTS (plugins_data_dir_tails); j++)
|
||||||
|
+ {
|
||||||
|
+ gchar *dir;
|
||||||
|
+
|
||||||
|
+ dir = g_strconcat (system_data_dirs[i], plugins_data_dir_tails[j], NULL);
|
||||||
|
+ panel_module_factory_load_modules_dir (factory, dir, warn_if_known);
|
||||||
|
+ g_free (dir);
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
/* load from the new and old location */
|
||||||
|
panel_module_factory_load_modules_dir (factory, PANEL_PLUGINS_DATA_DIR, warn_if_known);
|
||||||
|
panel_module_factory_load_modules_dir (factory, PANEL_PLUGINS_DATA_DIR_OLD, warn_if_known);
|
|
@ -74,6 +74,27 @@
|
||||||
(home-page "http://www.perl.org/")
|
(home-page "http://www.perl.org/")
|
||||||
(license gpl1+))) ; or "Artistic"
|
(license gpl1+))) ; or "Artistic"
|
||||||
|
|
||||||
|
(define-public perl-clone
|
||||||
|
(package
|
||||||
|
(name "perl-clone")
|
||||||
|
(version "0.37")
|
||||||
|
(source (origin
|
||||||
|
(method url-fetch)
|
||||||
|
(uri (string-append "mirror://cpan/authors/id/G/GA/GARU/"
|
||||||
|
"Clone-" version ".tar.gz"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"17fdhxpzrq2nwim3zkcrz4m9gjixp0i886yz54ysrshxy3k53wnr"))))
|
||||||
|
(build-system perl-build-system)
|
||||||
|
(synopsis "Recursively copy Perl datatypes")
|
||||||
|
(description
|
||||||
|
"This module provides a clone() method which makes recursive copies of
|
||||||
|
nested hash, array, scalar and reference types, including tied variables and
|
||||||
|
objects.")
|
||||||
|
(home-page (string-append "http://search.cpan.org/~garu/"
|
||||||
|
"Clone-" version))
|
||||||
|
(license (package-license perl))))
|
||||||
|
|
||||||
(define-public perl-file-list
|
(define-public perl-file-list
|
||||||
(package
|
(package
|
||||||
(name "perl-file-list")
|
(name "perl-file-list")
|
||||||
|
@ -253,6 +274,54 @@ Perlish API and none of the bloat and rarely used features of IPC::Run.")
|
||||||
;; licenses, any version."
|
;; licenses, any version."
|
||||||
(license (list bsd-3 gpl3+))))
|
(license (list bsd-3 gpl3+))))
|
||||||
|
|
||||||
|
(define-public perl-test-deep
|
||||||
|
(package
|
||||||
|
(name "perl-test-deep")
|
||||||
|
(version "0.114")
|
||||||
|
(source (origin
|
||||||
|
(method url-fetch)
|
||||||
|
(uri (string-append "mirror://cpan/authors/id/R/RJ/RJBS/"
|
||||||
|
"Test-Deep-" version ".tar.gz"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"09yr47vw7vj27sdik312x08938higcij8ybyq8k67mlccx8cpqf0"))))
|
||||||
|
(build-system perl-build-system)
|
||||||
|
(inputs `(("perl-test-tester" ,perl-test-tester)
|
||||||
|
("perl-test-nowarnings" ,perl-test-nowarnings)))
|
||||||
|
(synopsis "Flexible deep comparison for the Test::Builder framework")
|
||||||
|
(description
|
||||||
|
"Test::Deep compares two structures by going through each level, ensuring
|
||||||
|
that the values match, that arrays and hashes have the same elements and that
|
||||||
|
references are blessed into the correct class. It also handles circular data
|
||||||
|
structures without getting caught in an infinite loop.")
|
||||||
|
(home-page (string-append "http://search.cpan.org/~rjbs/"
|
||||||
|
"Test-Deep-" version))
|
||||||
|
(license gpl1+))) ; or "Artistic License"
|
||||||
|
|
||||||
|
(define-public perl-test-nowarnings
|
||||||
|
(package
|
||||||
|
(name "perl-test-nowarnings")
|
||||||
|
(version "1.04")
|
||||||
|
(source (origin
|
||||||
|
(method url-fetch)
|
||||||
|
(uri (string-append "mirror://cpan/authors/id/A/AD/ADAMK/"
|
||||||
|
"Test-NoWarnings-" version ".tar.gz"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"0v385ch0hzz9naqwdw2az3zdqi15gka76pmiwlgsy6diiijmg2k3"))))
|
||||||
|
(build-system perl-build-system)
|
||||||
|
(inputs `(("perl-test-tester" ,perl-test-tester)))
|
||||||
|
(synopsis "Ensure no warnings are produced while testing")
|
||||||
|
(description
|
||||||
|
"This modules causes any warnings during testing to be captured and
|
||||||
|
stored. It automatically adds an extra test that will run when your script
|
||||||
|
ends to check that there were no warnings. If there were any warings, the
|
||||||
|
test will fail and output diagnostics of where, when and what the warning was,
|
||||||
|
including a stack trace of what was going on when it occurred.")
|
||||||
|
(home-page (string-append "http://search.cpan.org/~adamk/"
|
||||||
|
"Test-NoWarnings-" version))
|
||||||
|
(license lgpl2.1)))
|
||||||
|
|
||||||
(define-public perl-test-script
|
(define-public perl-test-script
|
||||||
(package
|
(package
|
||||||
(name "perl-test-script")
|
(name "perl-test-script")
|
||||||
|
@ -277,6 +346,46 @@ bin as is also commonly used) paths of your Perl distribution.")
|
||||||
"Test-Script-" version))
|
"Test-Script-" version))
|
||||||
(license (package-license perl))))
|
(license (package-license perl))))
|
||||||
|
|
||||||
|
(define-public perl-test-simple
|
||||||
|
(package
|
||||||
|
(name "perl-test-simple")
|
||||||
|
(version "1.001009")
|
||||||
|
(source (origin
|
||||||
|
(method url-fetch)
|
||||||
|
(uri (string-append "mirror://cpan/authors/id/E/EX/EXODIST/"
|
||||||
|
"Test-Simple-" version ".tar.gz"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"1klxpy658aj1pmrw63j1hc16gilwh5rzhp9rb2d1iydi3hcm8xb5"))))
|
||||||
|
(build-system perl-build-system)
|
||||||
|
(synopsis "Basic utilities for writing tests")
|
||||||
|
(description
|
||||||
|
"Test::Simple contains basic utilities for writing tests.")
|
||||||
|
(home-page (string-append "http://search.cpan.org/~exodist/"
|
||||||
|
"Test-Simple-" version))
|
||||||
|
(license (package-license perl))))
|
||||||
|
|
||||||
|
(define-public perl-test-tester
|
||||||
|
(package
|
||||||
|
(name "perl-test-tester")
|
||||||
|
(version "0.109")
|
||||||
|
(source (origin
|
||||||
|
(method url-fetch)
|
||||||
|
(uri (string-append "mirror://cpan/authors/id/F/FD/FDALY/"
|
||||||
|
"Test-Tester-" version ".tar.gz"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"0m9n28z09kq455r5nydj1bnr85lvmbfpcbjdkjfbpmfb5xgciiyk"))))
|
||||||
|
(build-system perl-build-system)
|
||||||
|
(synopsis "Simplify running Test::Builder tests")
|
||||||
|
(description
|
||||||
|
"Test::Tester allows testing of test modules based on Test::Builder with
|
||||||
|
a minimum of effort.")
|
||||||
|
(home-page (string-append "http://search.cpan.org/~fdaly/"
|
||||||
|
"Test-Tester-" version))
|
||||||
|
;; "Under the same license as Perl itself"
|
||||||
|
(license (package-license perl))))
|
||||||
|
|
||||||
(define-public perl-file-which
|
(define-public perl-file-which
|
||||||
(package
|
(package
|
||||||
(name "perl-file-which")
|
(name "perl-file-which")
|
||||||
|
|
|
@ -37,6 +37,7 @@
|
||||||
#:use-module (gnu packages openssl)
|
#:use-module (gnu packages openssl)
|
||||||
#:use-module (gnu packages elf)
|
#:use-module (gnu packages elf)
|
||||||
#:use-module (gnu packages maths)
|
#:use-module (gnu packages maths)
|
||||||
|
#:use-module (gnu packages ncurses)
|
||||||
#:use-module (gnu packages gcc)
|
#:use-module (gnu packages gcc)
|
||||||
#:use-module (gnu packages pkg-config)
|
#:use-module (gnu packages pkg-config)
|
||||||
#:use-module (gnu packages databases)
|
#:use-module (gnu packages databases)
|
||||||
|
@ -50,6 +51,9 @@
|
||||||
#:use-module (gnu packages fontutils)
|
#:use-module (gnu packages fontutils)
|
||||||
#:use-module (gnu packages which)
|
#:use-module (gnu packages which)
|
||||||
#:use-module (gnu packages perl)
|
#:use-module (gnu packages perl)
|
||||||
|
#:use-module (gnu packages xorg)
|
||||||
|
#:use-module (gnu packages glib)
|
||||||
|
#:use-module (gnu packages gtk)
|
||||||
#:use-module (guix packages)
|
#:use-module (guix packages)
|
||||||
#:use-module (guix download)
|
#:use-module (guix download)
|
||||||
#:use-module (guix git-download)
|
#:use-module (guix git-download)
|
||||||
|
@ -614,6 +618,43 @@ get the local timezone information, unless you know the zoneinfo name, and
|
||||||
under several distributions that's hard or impossible to figure out.")
|
under several distributions that's hard or impossible to figure out.")
|
||||||
(license cc0)))
|
(license cc0)))
|
||||||
|
|
||||||
|
(define-public python-pysam
|
||||||
|
(package
|
||||||
|
(name "python-pysam")
|
||||||
|
(version "0.8.1")
|
||||||
|
(source
|
||||||
|
(origin
|
||||||
|
(method url-fetch)
|
||||||
|
(uri (string-append "https://pypi.python.org/packages/source/p/pysam/pysam-"
|
||||||
|
version ".tar.gz"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"1fb6i6hbpzxaxb62kyyp5alaidwhj40f7c6gwbhr6njzlqd5l459"))))
|
||||||
|
(build-system python-build-system)
|
||||||
|
(arguments
|
||||||
|
`(#:tests? #f ; tests are excluded in the manifest
|
||||||
|
#:phases
|
||||||
|
(alist-cons-before
|
||||||
|
'build 'set-flags
|
||||||
|
(lambda _
|
||||||
|
(setenv "LDFLAGS" "-lncurses")
|
||||||
|
(setenv "CFLAGS" "-D_CURSES_LIB=1"))
|
||||||
|
%standard-phases)))
|
||||||
|
(inputs
|
||||||
|
`(("python-cython" ,python-cython)
|
||||||
|
("python-setuptools" ,python-setuptools)
|
||||||
|
("ncurses" ,ncurses)
|
||||||
|
("zlib" ,zlib)))
|
||||||
|
(home-page "https://github.com/pysam-developers/pysam")
|
||||||
|
(synopsis "Python bindings to the SAMtools C API")
|
||||||
|
(description
|
||||||
|
"Pysam is a Python module for reading and manipulating files in the
|
||||||
|
SAM/BAM format. Pysam is a lightweight wrapper of the SAMtools C API. It
|
||||||
|
also includes an interface for tabix.")
|
||||||
|
(license expat)))
|
||||||
|
|
||||||
|
(define-public python2-pysam
|
||||||
|
(package-with-python2 python-pysam))
|
||||||
|
|
||||||
(define-public python2-pysqlite
|
(define-public python2-pysqlite
|
||||||
(package
|
(package
|
||||||
|
@ -2117,10 +2158,35 @@ that client code uses to construct the grammar directly in Python code.")
|
||||||
"0m6v9nwdldlwk22gcd339zg6mny5m301fxgks7z8sb8m9wawg8qp"))))
|
"0m6v9nwdldlwk22gcd339zg6mny5m301fxgks7z8sb8m9wawg8qp"))))
|
||||||
(build-system python-build-system)
|
(build-system python-build-system)
|
||||||
(outputs '("out" "doc"))
|
(outputs '("out" "doc"))
|
||||||
|
(propagated-inputs ; the following packages are all needed at run time
|
||||||
|
`(("python-pyparsing" ,python-pyparsing)
|
||||||
|
("python-pygobject" ,python-pygobject)
|
||||||
|
("gobject-introspection" ,gobject-introspection)
|
||||||
|
;; The 'gtk+' package (and 'gdk-pixbuf', 'atk' and 'pango' propagated
|
||||||
|
;; from 'gtk+') provides the required 'typelib' files used by
|
||||||
|
;; 'gobject-introspection'. The location of these files is set with the
|
||||||
|
;; help of the environment variable GI_TYPELIB_PATH. At build time this
|
||||||
|
;; is done automatically by a 'native-search-path' procedure. However,
|
||||||
|
;; at run-time the user must set this variable as follows:
|
||||||
|
;;
|
||||||
|
;; export GI_TYPELIB_PATH=~/.guix-profile/lib/girepository-1.0
|
||||||
|
;;
|
||||||
|
;; 'typelib' files include references to dynamic libraries. Currently
|
||||||
|
;; the references do not include the full path to the libraries. For
|
||||||
|
;; this reason the user must set the LD_LIBRARY_PATH to the location of
|
||||||
|
;; 'libgtk-3.so.0', 'libgdk-3.so.0' and 'libatk-1.0.so.0':
|
||||||
|
;;
|
||||||
|
;; export LD_LIBRARY_PATH=~/.guix-profile/lib
|
||||||
|
("gtk+" ,gtk+)
|
||||||
|
;; From version 1.4.0 'matplotlib' makes use of 'cairocffi' instead of
|
||||||
|
;; 'pycairo'. However, 'pygobject' makes use of a 'pycairo' 'context'
|
||||||
|
;; object. For this reason we need to import both libraries.
|
||||||
|
;; https://pythonhosted.org/cairocffi/cffi_api.html#converting-pycairo
|
||||||
|
("python-pycairo" ,python-pycairo)
|
||||||
|
("python-cairocffi" ,python-cairocffi)))
|
||||||
(inputs
|
(inputs
|
||||||
`(("python-setuptools" ,python-setuptools)
|
`(("python-setuptools" ,python-setuptools)
|
||||||
("python-dateutil" ,python-dateutil-2)
|
("python-dateutil" ,python-dateutil-2)
|
||||||
("python-pyparsing" ,python-pyparsing)
|
|
||||||
("python-six" ,python-six)
|
("python-six" ,python-six)
|
||||||
("python-pytz" ,python-pytz)
|
("python-pytz" ,python-pytz)
|
||||||
("python-numpy" ,python-numpy-bootstrap)
|
("python-numpy" ,python-numpy-bootstrap)
|
||||||
|
@ -2131,10 +2197,10 @@ that client code uses to construct the grammar directly in Python code.")
|
||||||
("libpng" ,libpng)
|
("libpng" ,libpng)
|
||||||
("imagemagick" ,imagemagick)
|
("imagemagick" ,imagemagick)
|
||||||
("freetype" ,freetype)
|
("freetype" ,freetype)
|
||||||
|
("cairo" ,cairo)
|
||||||
|
("glib" ,glib)
|
||||||
|
("python-pillow" ,python-pillow)
|
||||||
;; FIXME: Add backends when available.
|
;; FIXME: Add backends when available.
|
||||||
;("python-pygtk" ,python-pygtk)
|
|
||||||
;("python-pycairo" ,python-pycairo)
|
|
||||||
;("python-pygobject" ,python-pygobject)
|
|
||||||
;("python-wxpython" ,python-wxpython)
|
;("python-wxpython" ,python-wxpython)
|
||||||
;("python-pyqt" ,python-pyqt)
|
;("python-pyqt" ,python-pyqt)
|
||||||
))
|
))
|
||||||
|
@ -2144,40 +2210,51 @@ that client code uses to construct the grammar directly in Python code.")
|
||||||
("texinfo" ,texinfo)))
|
("texinfo" ,texinfo)))
|
||||||
(arguments
|
(arguments
|
||||||
`(#:phases
|
`(#:phases
|
||||||
(alist-cons-after
|
(alist-cons-before
|
||||||
'install 'install-doc
|
'build 'configure-environment
|
||||||
(lambda* (#:key outputs #:allow-other-keys)
|
(lambda* (#:key outputs inputs #:allow-other-keys)
|
||||||
(let* ((data (string-append (assoc-ref outputs "doc") "/share"))
|
(let ((cairo (assoc-ref inputs "cairo"))
|
||||||
(doc (string-append data "/doc/" ,name "-" ,version))
|
(gtk+ (assoc-ref inputs "gtk+")))
|
||||||
(info (string-append data "/info"))
|
;; Setting these directories in the 'basedirlist' of 'setup.cfg'
|
||||||
(html (string-append doc "/html")))
|
;; has not effect.
|
||||||
(with-directory-excursion "doc"
|
;;
|
||||||
;; Without setting this variable we get an encoding error.
|
;; FIXME: setting LD_LIBRARY_PATH should be removed once we patch
|
||||||
(setenv "LANG" "en_US.UTF-8")
|
;; gobject-introspection to include the full path of shared
|
||||||
;; Produce pdf in 'A4' format.
|
;; libraries in 'typelib' files.
|
||||||
(substitute* (find-files "." "conf\\.py")
|
(setenv "LD_LIBRARY_PATH"
|
||||||
(("latex_paper_size = 'letter'")
|
(string-append cairo "/lib:" gtk+ "/lib"))
|
||||||
"latex_paper_size = 'a4'"))
|
(setenv "HOME" (getcwd))
|
||||||
(mkdir-p html)
|
(call-with-output-file "setup.cfg"
|
||||||
(mkdir-p info)
|
(lambda (port)
|
||||||
;; The doc recommends to run the 'html' target twice.
|
(format port "[rc_options]~%
|
||||||
(system* "python" "make.py" "html")
|
backend = GTK3Agg~%")))))
|
||||||
(system* "python" "make.py" "html")
|
(alist-cons-after
|
||||||
(system* "python" "make.py" "latex")
|
'install 'install-doc
|
||||||
(system* "python" "make.py" "texinfo")
|
(lambda* (#:key outputs #:allow-other-keys)
|
||||||
(copy-file "build/texinfo/matplotlib.info"
|
(let* ((data (string-append (assoc-ref outputs "doc") "/share"))
|
||||||
(string-append info "/matplotlib.info"))
|
(doc (string-append data "/doc/" ,name "-" ,version))
|
||||||
(copy-file "build/latex/Matplotlib.pdf"
|
(info (string-append data "/info"))
|
||||||
(string-append doc "/Matplotlib.pdf"))
|
(html (string-append doc "/html")))
|
||||||
(with-directory-excursion "build/html"
|
(with-directory-excursion "doc"
|
||||||
(map (lambda (file)
|
;; Without setting this variable we get an encoding error.
|
||||||
(let* ((dir (dirname file))
|
(setenv "LANG" "en_US.UTF-8")
|
||||||
(tgt-dir (string-append html "/" dir)))
|
;; Produce pdf in 'A4' format.
|
||||||
(unless (equal? "." dir)
|
(substitute* (find-files "." "conf\\.py")
|
||||||
(mkdir-p tgt-dir))
|
(("latex_paper_size = 'letter'")
|
||||||
(copy-file file (string-append html "/" file))))
|
"latex_paper_size = 'a4'"))
|
||||||
(find-files "." ".*"))))))
|
(mkdir-p html)
|
||||||
%standard-phases)))
|
(mkdir-p info)
|
||||||
|
;; The doc recommends to run the 'html' target twice.
|
||||||
|
(system* "python" "make.py" "html")
|
||||||
|
(system* "python" "make.py" "html")
|
||||||
|
(system* "python" "make.py" "latex")
|
||||||
|
(system* "python" "make.py" "texinfo")
|
||||||
|
(copy-file "build/texinfo/matplotlib.info"
|
||||||
|
(string-append info "/matplotlib.info"))
|
||||||
|
(copy-file "build/latex/Matplotlib.pdf"
|
||||||
|
(string-append doc "/Matplotlib.pdf"))
|
||||||
|
(copy-recursively "build/html" html))))
|
||||||
|
%standard-phases))))
|
||||||
(home-page "http://matplotlib.org")
|
(home-page "http://matplotlib.org")
|
||||||
(synopsis "2D plotting library for Python")
|
(synopsis "2D plotting library for Python")
|
||||||
(description
|
(description
|
||||||
|
@ -2193,9 +2270,17 @@ toolkits.")
|
||||||
(package (inherit matplotlib)
|
(package (inherit matplotlib)
|
||||||
;; Make sure we use exactly PYTHON2-NUMPYDOC, which is
|
;; Make sure we use exactly PYTHON2-NUMPYDOC, which is
|
||||||
;; customized for Python 2.
|
;; customized for Python 2.
|
||||||
(inputs `(("python2-numpydoc" ,python2-numpydoc)
|
(propagated-inputs
|
||||||
,@(alist-delete "python-numpydoc"
|
`(("python2-py2cairo" ,python2-py2cairo)
|
||||||
(package-inputs matplotlib)))))))
|
("python2-pygobject-2" ,python2-pygobject-2)
|
||||||
|
,@(alist-delete "python-pycairo"
|
||||||
|
(alist-delete "python-pygobject"
|
||||||
|
(package-propagated-inputs
|
||||||
|
matplotlib)))))
|
||||||
|
(inputs
|
||||||
|
`(("python2-numpydoc" ,python2-numpydoc)
|
||||||
|
,@(alist-delete "python-numpydoc"
|
||||||
|
(package-inputs matplotlib)))))))
|
||||||
|
|
||||||
;; Scipy 0.14.0 with Numpy 0.19.X fails several tests. This is known and
|
;; Scipy 0.14.0 with Numpy 0.19.X fails several tests. This is known and
|
||||||
;; planned to be fixed in 0.14.1. It is claimed that the failures can safely
|
;; planned to be fixed in 0.14.1. It is claimed that the failures can safely
|
||||||
|
@ -2542,3 +2627,102 @@ a front-end for C compilers or analysis tools.")
|
||||||
|
|
||||||
(define-public python2-cffi
|
(define-public python2-cffi
|
||||||
(package-with-python2 python-cffi))
|
(package-with-python2 python-cffi))
|
||||||
|
|
||||||
|
(define-public python-xcffib
|
||||||
|
(package
|
||||||
|
(name "python-xcffib")
|
||||||
|
(version "0.1.9")
|
||||||
|
(source
|
||||||
|
(origin
|
||||||
|
(method url-fetch)
|
||||||
|
(uri (string-append "https://pypi.python.org/packages/source/x/"
|
||||||
|
"xcffib/xcffib-" version ".tar.gz"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"0655hzxv57h1a9ja9kwp0ichbkhf3djw32k33d66xp0q37dq2y81"))))
|
||||||
|
(build-system python-build-system)
|
||||||
|
(inputs
|
||||||
|
`(("libxcb" ,libxcb)
|
||||||
|
("python-six" ,python-six)))
|
||||||
|
(native-inputs
|
||||||
|
`(("python-setuptools" ,python-setuptools)))
|
||||||
|
(propagated-inputs
|
||||||
|
`(("python-cffi" ,python-cffi))) ; used at run time
|
||||||
|
(arguments
|
||||||
|
`(#:phases
|
||||||
|
(alist-cons-after
|
||||||
|
'install 'install-doc
|
||||||
|
(lambda* (#:key outputs #:allow-other-keys)
|
||||||
|
(let ((doc (string-append (assoc-ref outputs "out") "/share"
|
||||||
|
"/doc/" ,name "-" ,version)))
|
||||||
|
(mkdir-p doc)
|
||||||
|
(copy-file "README.md"
|
||||||
|
(string-append doc "/README.md"))))
|
||||||
|
%standard-phases)))
|
||||||
|
(home-page "https://github.com/tych0/xcffib")
|
||||||
|
(synopsis "XCB Python bindings")
|
||||||
|
(description
|
||||||
|
"Xcffib is a replacement for xpyb, an XCB Python bindings. It adds
|
||||||
|
support for Python 3 and PyPy. It is based on cffi.")
|
||||||
|
(license expat)))
|
||||||
|
|
||||||
|
(define-public python2-xcffib
|
||||||
|
(package-with-python2 python-xcffib))
|
||||||
|
|
||||||
|
(define-public python-cairocffi
|
||||||
|
(package
|
||||||
|
(name "python-cairocffi")
|
||||||
|
(version "0.6")
|
||||||
|
(source
|
||||||
|
(origin
|
||||||
|
(method url-fetch)
|
||||||
|
;; The archive on pypi is missing the 'utils' directory!
|
||||||
|
(uri (string-append "https://github.com/SimonSapin/cairocffi/archive/v"
|
||||||
|
version ".tar.gz"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"03w5p62sp3nqiccx864sbq0jvh7946277jqx3rcc3dch5xwfvv51"))))
|
||||||
|
(build-system python-build-system)
|
||||||
|
(outputs '("out" "doc"))
|
||||||
|
(inputs
|
||||||
|
`(("gdk-pixbuf" ,gdk-pixbuf)
|
||||||
|
("cairo" ,cairo)))
|
||||||
|
(native-inputs
|
||||||
|
`(("pkg-config" ,pkg-config)
|
||||||
|
("python-sphinx" ,python-sphinx)
|
||||||
|
("python-docutils" ,python-docutils)
|
||||||
|
("python-setuptools" ,python-setuptools)))
|
||||||
|
(propagated-inputs
|
||||||
|
`(("python-xcffib" ,python-xcffib))) ; used at run time
|
||||||
|
(arguments
|
||||||
|
`(#:phases
|
||||||
|
(alist-cons-after
|
||||||
|
'install 'install-doc
|
||||||
|
(lambda* (#:key inputs outputs #:allow-other-keys)
|
||||||
|
(let* ((data (string-append (assoc-ref outputs "doc") "/share"))
|
||||||
|
(doc (string-append data "/doc/" ,name "-" ,version))
|
||||||
|
(html (string-append doc "/html")))
|
||||||
|
(setenv "LD_LIBRARY_PATH"
|
||||||
|
(string-append (assoc-ref inputs "cairo") "/lib" ":"
|
||||||
|
(assoc-ref inputs "gdk-pixbuf") "/lib"))
|
||||||
|
(setenv "LANG" "en_US.UTF-8")
|
||||||
|
(mkdir-p html)
|
||||||
|
(for-each (lambda (file)
|
||||||
|
(copy-file (string-append "." file)
|
||||||
|
(string-append doc file)))
|
||||||
|
'("/README.rst" "/CHANGES" "/LICENSE"))
|
||||||
|
(system* "python" "setup.py" "build_sphinx")
|
||||||
|
(copy-recursively "docs/_build/html" html)))
|
||||||
|
%standard-phases)))
|
||||||
|
(home-page "https://github.com/SimonSapin/cairocffi")
|
||||||
|
(synopsis "Python bindings and object-oriented API for Cairo")
|
||||||
|
(description
|
||||||
|
"Cairocffi is a CFFI-based drop-in replacement for Pycairo, a set of
|
||||||
|
Python bindings and object-oriented API for cairo. Cairo is a 2D vector
|
||||||
|
graphics library with support for multiple backends including image buffers,
|
||||||
|
PNG, PostScript, PDF, and SVG file output.")
|
||||||
|
(license bsd-3)))
|
||||||
|
|
||||||
|
(define-public python2-cairocffi
|
||||||
|
(package-with-python2 python-cairocffi))
|
||||||
|
|
||||||
|
|
|
@ -42,14 +42,14 @@
|
||||||
;; This is QEMU without GUI support.
|
;; This is QEMU without GUI support.
|
||||||
(package
|
(package
|
||||||
(name "qemu-headless")
|
(name "qemu-headless")
|
||||||
(version "2.0.0")
|
(version "2.2.0")
|
||||||
(source (origin
|
(source (origin
|
||||||
(method url-fetch)
|
(method url-fetch)
|
||||||
(uri (string-append "http://wiki.qemu-project.org/download/qemu-"
|
(uri (string-append "http://wiki.qemu-project.org/download/qemu-"
|
||||||
version ".tar.bz2"))
|
version ".tar.bz2"))
|
||||||
(sha256
|
(sha256
|
||||||
(base32
|
(base32
|
||||||
"0frsahiw56jr4cqr9m6s383lyj4ar9hfs2wp3y4yr76krah1mk30"))))
|
"1703c3scl5n07gmpilg7g2xzyxnr7jczxgx6nn4m8kv9gin9p35n"))))
|
||||||
(build-system gnu-build-system)
|
(build-system gnu-build-system)
|
||||||
(arguments
|
(arguments
|
||||||
'(#:phases (alist-replace
|
'(#:phases (alist-replace
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
(define-public tmux
|
(define-public tmux
|
||||||
(package
|
(package
|
||||||
(name "tmux")
|
(name "tmux")
|
||||||
(version "1.7")
|
(version "1.9a")
|
||||||
(source (origin
|
(source (origin
|
||||||
(method url-fetch)
|
(method url-fetch)
|
||||||
(uri (string-append
|
(uri (string-append
|
||||||
|
@ -36,7 +36,7 @@
|
||||||
version "/tmux-" version ".tar.gz"))
|
version "/tmux-" version ".tar.gz"))
|
||||||
(sha256
|
(sha256
|
||||||
(base32
|
(base32
|
||||||
"0ywy1x2g905hmhkdz418ik42lcvnhnwr8fv63rcqczfg27d6nd38"))))
|
"1x9k4wfd4l5jg6fh7xkr3yyilizha6ka8m5b1nr0kw8wj0mv5qy5"))))
|
||||||
(build-system gnu-build-system)
|
(build-system gnu-build-system)
|
||||||
(inputs
|
(inputs
|
||||||
`(("libevent" ,libevent)
|
`(("libevent" ,libevent)
|
||||||
|
|
|
@ -395,7 +395,7 @@ SVCD, DVD, 3ivx, DivX 3/4/5, WMV and H.264 movies.")
|
||||||
(define-public youtube-dl
|
(define-public youtube-dl
|
||||||
(package
|
(package
|
||||||
(name "youtube-dl")
|
(name "youtube-dl")
|
||||||
(version "2014.11.21.1")
|
(version "2014.12.15")
|
||||||
(source (origin
|
(source (origin
|
||||||
(method url-fetch)
|
(method url-fetch)
|
||||||
(uri (string-append "http://youtube-dl.org/downloads/"
|
(uri (string-append "http://youtube-dl.org/downloads/"
|
||||||
|
@ -403,7 +403,7 @@ SVCD, DVD, 3ivx, DivX 3/4/5, WMV and H.264 movies.")
|
||||||
version ".tar.gz"))
|
version ".tar.gz"))
|
||||||
(sha256
|
(sha256
|
||||||
(base32
|
(base32
|
||||||
"0rxpx8j4qhhsws6czlfji1x9igsinkbbwvld10qdylll7g9q1v7j"))))
|
"09z7v6jxs4a36kyy681mcypcqsxipplnbdy9s3rva1rpp5f74h2z"))))
|
||||||
(build-system python-build-system)
|
(build-system python-build-system)
|
||||||
(inputs `(("setuptools" ,python-setuptools)))
|
(inputs `(("setuptools" ,python-setuptools)))
|
||||||
(home-page "http://youtube-dl.org")
|
(home-page "http://youtube-dl.org")
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#:use-module (gnu packages image)
|
#:use-module (gnu packages image)
|
||||||
#:use-module (gnu packages pkg-config)
|
#:use-module (gnu packages pkg-config)
|
||||||
#:use-module (gnu packages glib)
|
#:use-module (gnu packages glib)
|
||||||
|
#:use-module (gnu packages perl)
|
||||||
#:use-module (gnu packages xorg))
|
#:use-module (gnu packages xorg))
|
||||||
|
|
||||||
;; packages outside the x.org system proper
|
;; packages outside the x.org system proper
|
||||||
|
@ -57,6 +58,47 @@ can also be used for copying files, as an alternative to sftp/scp, thus
|
||||||
avoiding password prompts when X11 forwarding has already been setup.")
|
avoiding password prompts when X11 forwarding has already been setup.")
|
||||||
(license license:gpl2+)))
|
(license license:gpl2+)))
|
||||||
|
|
||||||
|
(define-public xdotool
|
||||||
|
(package
|
||||||
|
(name "xdotool")
|
||||||
|
(version "2.20110530.1")
|
||||||
|
(source
|
||||||
|
(origin
|
||||||
|
(method url-fetch)
|
||||||
|
(uri (string-append
|
||||||
|
"http://semicomplete.googlecode.com/files/" name "-"
|
||||||
|
version ".tar.gz"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"0rxggg1cy7nnkwidx8x2w3c5f3pk6dh2b6q0q7hp069r3n5jrd77"))))
|
||||||
|
(build-system gnu-build-system)
|
||||||
|
(arguments
|
||||||
|
'(#:tests? #f ; Test suite requires a lot of black magic
|
||||||
|
#:phases
|
||||||
|
(alist-replace 'configure
|
||||||
|
(lambda* (#:key outputs #:allow-other-keys #:rest args)
|
||||||
|
(setenv "PREFIX" (assoc-ref outputs "out"))
|
||||||
|
(setenv "LDFLAGS" (string-append "-Wl,-rpath="
|
||||||
|
(assoc-ref
|
||||||
|
%outputs "out") "/lib"))
|
||||||
|
(setenv "CC" "gcc"))
|
||||||
|
%standard-phases)))
|
||||||
|
(native-inputs `(("perl" ,perl))) ; for pod2man
|
||||||
|
(inputs `(("libx11" ,libx11)
|
||||||
|
("libxext" ,libxext)
|
||||||
|
("libxi" ,libxi)
|
||||||
|
("libxinerama" ,libxinerama)
|
||||||
|
("libxtst" ,libxtst)))
|
||||||
|
(home-page "http://www.semicomplete.com/projects/xdotool")
|
||||||
|
(synopsis "Fake keyboard/mouse input, window management, and more")
|
||||||
|
(description "Xdotool lets you simulate keyboard input and mouse activity,
|
||||||
|
move and resize windows, etc. It does this using X11's XTEST extension and
|
||||||
|
other Xlib functions. Additionally, you can search for windows and move,
|
||||||
|
resize, hide, and modify window properties like the title. If your window
|
||||||
|
manager supports it, you can use xdotool to switch desktops, move windows
|
||||||
|
between desktops, and change the number of desktops.")
|
||||||
|
(license license:bsd-3)))
|
||||||
|
|
||||||
(define-public xeyes
|
(define-public xeyes
|
||||||
(package
|
(package
|
||||||
(name "xeyes")
|
(name "xeyes")
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
;;; GNU Guix --- Functional package management for GNU
|
;;; GNU Guix --- Functional package management for GNU
|
||||||
;;; Copyright © 2014 Sou Bunnbu <iyzsong@gmail.com>
|
;;; Copyright © 2014 Sou Bunnbu <iyzsong@gmail.com>
|
||||||
|
;;; Copyright © 2014 Mark H Weaver <mhw@netris.org>
|
||||||
;;;
|
;;;
|
||||||
;;; This file is part of GNU Guix.
|
;;; This file is part of GNU Guix.
|
||||||
;;;
|
;;;
|
||||||
|
@ -22,6 +23,7 @@
|
||||||
#:use-module (guix download)
|
#:use-module (guix download)
|
||||||
#:use-module (guix utils)
|
#:use-module (guix utils)
|
||||||
#:use-module (guix build-system gnu)
|
#:use-module (guix build-system gnu)
|
||||||
|
#:use-module (gnu packages)
|
||||||
#:use-module (gnu packages pkg-config)
|
#:use-module (gnu packages pkg-config)
|
||||||
#:use-module (gnu packages glib)
|
#:use-module (gnu packages glib)
|
||||||
#:use-module (gnu packages gtk)
|
#:use-module (gnu packages gtk)
|
||||||
|
@ -249,7 +251,8 @@ management D-Bus specification.")
|
||||||
"/src/" name "-" version ".tar.bz2"))
|
"/src/" name "-" version ".tar.bz2"))
|
||||||
(sha256
|
(sha256
|
||||||
(base32
|
(base32
|
||||||
"1f8903nx6ivzircl8d8s9zna4vjgfy0qhjk5d2x19g9bmycgj89k"))))
|
"1f8903nx6ivzircl8d8s9zna4vjgfy0qhjk5d2x19g9bmycgj89k"))
|
||||||
|
(patches (list (search-patch "xfce4-panel-plugins.patch")))))
|
||||||
(build-system gnu-build-system)
|
(build-system gnu-build-system)
|
||||||
(native-inputs
|
(native-inputs
|
||||||
`(("pkg-config" ,pkg-config)
|
`(("pkg-config" ,pkg-config)
|
||||||
|
@ -261,6 +264,10 @@ management D-Bus specification.")
|
||||||
("garcon", garcon)
|
("garcon", garcon)
|
||||||
("libwnck" ,libwnck-1)
|
("libwnck" ,libwnck-1)
|
||||||
("libxfce4ui" ,libxfce4ui)))
|
("libxfce4ui" ,libxfce4ui)))
|
||||||
|
(native-search-paths
|
||||||
|
(list (search-path-specification
|
||||||
|
(variable "X_XFCE4_LIB_DIRS")
|
||||||
|
(directories '("lib/xfce4")))))
|
||||||
(home-page "http://www.xfce.org/")
|
(home-page "http://www.xfce.org/")
|
||||||
(synopsis "Xfce desktop panel")
|
(synopsis "Xfce desktop panel")
|
||||||
(description
|
(description
|
||||||
|
@ -269,6 +276,35 @@ applications menu, workspace switcher and more.")
|
||||||
;; Libraries are under LGPLv2.1+, and programs under GPLv2+.
|
;; Libraries are under LGPLv2.1+, and programs under GPLv2+.
|
||||||
(license (list gpl2+ lgpl2.1+))))
|
(license (list gpl2+ lgpl2.1+))))
|
||||||
|
|
||||||
|
(define-public xfce4-battery-plugin
|
||||||
|
(package
|
||||||
|
(name "xfce4-battery-plugin")
|
||||||
|
(version "1.0.5")
|
||||||
|
(source (origin
|
||||||
|
(method url-fetch)
|
||||||
|
(uri (string-append "http://archive.xfce.org/src/panel-plugins/"
|
||||||
|
name "/" (version-major+minor version) "/"
|
||||||
|
name "-" version ".tar.bz2"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"04gbplcj8z4vg5xbks8cc2jjf62mmf9sdymg90scjwmb82pv2ngn"))))
|
||||||
|
(build-system gnu-build-system)
|
||||||
|
(native-inputs `(("pkg-config" ,pkg-config)
|
||||||
|
("intltool" ,intltool)))
|
||||||
|
(inputs `(("glib" ,glib)
|
||||||
|
("gtk+" ,gtk+-2)
|
||||||
|
("libxfce4util" ,libxfce4util)
|
||||||
|
("libxfce4ui" ,libxfce4ui)
|
||||||
|
("xfce4-panel" ,xfce4-panel)))
|
||||||
|
(home-page
|
||||||
|
"http://goodies.xfce.org/projects/panel-plugins/xfce4-battery-plugin")
|
||||||
|
(synopsis "Battery monitor panel plugin for Xfce4")
|
||||||
|
(description
|
||||||
|
"A battery monitor panel plugin for Xfce4, compatible with APM and ACPI.")
|
||||||
|
;; The main plugin code is covered by gpl2+, but the files containing code
|
||||||
|
;; to read the battery state via ACPI or APM are covered by lgpl2.0+.
|
||||||
|
(license (list gpl2+ lgpl2.0+))))
|
||||||
|
|
||||||
(define-public xfce4-appfinder
|
(define-public xfce4-appfinder
|
||||||
(package
|
(package
|
||||||
(name "xfce4-appfinder")
|
(name "xfce4-appfinder")
|
||||||
|
@ -476,3 +512,33 @@ on the screen.")
|
||||||
optional application menu or icons for minimized applications or launchers,
|
optional application menu or icons for minimized applications or launchers,
|
||||||
devices and folders.")
|
devices and folders.")
|
||||||
(license gpl2+)))
|
(license gpl2+)))
|
||||||
|
|
||||||
|
(define-public xfce4-terminal
|
||||||
|
(package
|
||||||
|
(name "xfce4-terminal")
|
||||||
|
(version "0.6.3")
|
||||||
|
(source (origin
|
||||||
|
(method url-fetch)
|
||||||
|
(uri (string-append "http://archive.xfce.org/src/apps/" name "/"
|
||||||
|
(version-major+minor version) "/"
|
||||||
|
name "-" version ".tar.bz2"))
|
||||||
|
(sha256
|
||||||
|
(base32
|
||||||
|
"023y0lkfijifh05yz8grimxadqpi98mrivr00sl18nirq8b4fbwi"))))
|
||||||
|
(build-system gnu-build-system)
|
||||||
|
(native-inputs
|
||||||
|
`(("pkg-config" ,pkg-config)
|
||||||
|
("intltool" ,intltool)))
|
||||||
|
(inputs
|
||||||
|
`(("libxfce4ui" ,libxfce4ui)
|
||||||
|
("vte" ,vte/gtk+-2)))
|
||||||
|
(home-page "http://www.xfce.org/")
|
||||||
|
(synopsis "Xfce terminal emulator")
|
||||||
|
(description
|
||||||
|
"A lightweight and easy to use terminal emulator for Xfce. Features
|
||||||
|
include a simple configuration interface, the ability to use multiple tabs
|
||||||
|
with terminals within a single window, the possibility to have a
|
||||||
|
pseudo-transparent terminal background, and a compact mode (where both the
|
||||||
|
menubar and the window decorations are hidden) that helps you to save space
|
||||||
|
on your desktop.")
|
||||||
|
(license gpl2+)))
|
||||||
|
|
|
@ -33,8 +33,10 @@
|
||||||
#:select (mount-flags->bit-mask))
|
#:select (mount-flags->bit-mask))
|
||||||
#:use-module (guix gexp)
|
#:use-module (guix gexp)
|
||||||
#:use-module (guix monads)
|
#:use-module (guix monads)
|
||||||
|
#:use-module (guix records)
|
||||||
#:use-module (srfi srfi-1)
|
#:use-module (srfi srfi-1)
|
||||||
#:use-module (srfi srfi-26)
|
#:use-module (srfi srfi-26)
|
||||||
|
#:use-module (ice-9 match)
|
||||||
#:use-module (ice-9 format)
|
#:use-module (ice-9 format)
|
||||||
#:export (root-file-system-service
|
#:export (root-file-system-service
|
||||||
file-system-service
|
file-system-service
|
||||||
|
@ -46,6 +48,16 @@
|
||||||
console-font-service
|
console-font-service
|
||||||
udev-service
|
udev-service
|
||||||
mingetty-service
|
mingetty-service
|
||||||
|
|
||||||
|
%nscd-default-caches
|
||||||
|
%nscd-default-configuration
|
||||||
|
|
||||||
|
nscd-configuration
|
||||||
|
nscd-configuration?
|
||||||
|
|
||||||
|
nscd-cache
|
||||||
|
nscd-cache?
|
||||||
|
|
||||||
nscd-service
|
nscd-service
|
||||||
syslog-service
|
syslog-service
|
||||||
guix-service
|
guix-service
|
||||||
|
@ -374,9 +386,110 @@ the ``message of the day''."
|
||||||
#:allow-empty-passwords? allow-empty-passwords?
|
#:allow-empty-passwords? allow-empty-passwords?
|
||||||
#:motd motd)))))))
|
#:motd motd)))))))
|
||||||
|
|
||||||
(define* (nscd-service #:key (glibc (canonical-package glibc)))
|
(define-record-type* <nscd-configuration> nscd-configuration
|
||||||
"Return a service that runs libc's name service cache daemon (nscd)."
|
make-nscd-configuration
|
||||||
(with-monad %store-monad
|
nscd-configuration?
|
||||||
|
(log-file nscd-configuration-log-file ;string
|
||||||
|
(default "/var/log/nscd.log"))
|
||||||
|
(debug-level nscd-debug-level ;integer
|
||||||
|
(default 0))
|
||||||
|
;; TODO: See nscd.conf in glibc for other options to add.
|
||||||
|
(caches nscd-configuration-caches ;list of <nscd-cache>
|
||||||
|
(default %nscd-default-caches)))
|
||||||
|
|
||||||
|
(define-record-type* <nscd-cache> nscd-cache make-nscd-cache
|
||||||
|
nscd-cache?
|
||||||
|
(database nscd-cache-database) ;symbol
|
||||||
|
(positive-time-to-live nscd-cache-positive-time-to-live) ;integer
|
||||||
|
(negative-time-to-live nscd-cache-negative-time-to-live
|
||||||
|
(default 20)) ;integer
|
||||||
|
(suggested-size nscd-cache-suggested-size ;integer ("default module
|
||||||
|
;of hash table")
|
||||||
|
(default 211))
|
||||||
|
(check-files? nscd-cache-check-files? ;Boolean
|
||||||
|
(default #t))
|
||||||
|
(persistent? nscd-cache-persistent? ;Boolean
|
||||||
|
(default #t))
|
||||||
|
(shared? nscd-cache-shared? ;Boolean
|
||||||
|
(default #t))
|
||||||
|
(max-database-size nscd-cache-max-database-size ;integer
|
||||||
|
(default (* 32 (expt 2 20))))
|
||||||
|
(auto-propagate? nscd-cache-auto-propagate? ;Boolean
|
||||||
|
(default #t)))
|
||||||
|
|
||||||
|
(define %nscd-default-caches
|
||||||
|
;; Caches that we want to enable by default. Note that when providing an
|
||||||
|
;; empty nscd.conf, all caches are disabled.
|
||||||
|
(list (nscd-cache (database 'hosts)
|
||||||
|
|
||||||
|
;; Aggressively cache the host name cache to improve
|
||||||
|
;; privacy and resilience.
|
||||||
|
(positive-time-to-live (* 3600 12))
|
||||||
|
(negative-time-to-live 20)
|
||||||
|
(persistent? #t))
|
||||||
|
|
||||||
|
(nscd-cache (database 'services)
|
||||||
|
|
||||||
|
;; Services are unlikely to change, so we can be even more
|
||||||
|
;; aggressive.
|
||||||
|
(positive-time-to-live (* 3600 24))
|
||||||
|
(negative-time-to-live 3600)
|
||||||
|
(check-files? #t) ;check /etc/services changes
|
||||||
|
(persistent? #t))))
|
||||||
|
|
||||||
|
(define %nscd-default-configuration
|
||||||
|
;; Default nscd configuration.
|
||||||
|
(nscd-configuration))
|
||||||
|
|
||||||
|
(define (nscd.conf-file config)
|
||||||
|
"Return the @file{nscd.conf} configuration file for @var{config}, an
|
||||||
|
@code{<nscd-configuration>} object."
|
||||||
|
(define cache->config
|
||||||
|
(match-lambda
|
||||||
|
(($ <nscd-cache> (= symbol->string database)
|
||||||
|
positive-ttl negative-ttl size check-files?
|
||||||
|
persistent? shared? max-size propagate?)
|
||||||
|
(string-append "\nenable-cache\t" database "\tyes\n"
|
||||||
|
|
||||||
|
"positive-time-to-live\t" database "\t"
|
||||||
|
(number->string positive-ttl) "\n"
|
||||||
|
"negative-time-to-live\t" database "\t"
|
||||||
|
(number->string negative-ttl) "\n"
|
||||||
|
"suggested-size\t" database "\t"
|
||||||
|
(number->string size) "\n"
|
||||||
|
"check-files\t" database "\t"
|
||||||
|
(if check-files? "yes\n" "no\n")
|
||||||
|
"persistent\t" database "\t"
|
||||||
|
(if persistent? "yes\n" "no\n")
|
||||||
|
"shared\t" database "\t"
|
||||||
|
(if shared? "yes\n" "no\n")
|
||||||
|
"max-db-size\t" database "\t"
|
||||||
|
(number->string max-size) "\n"
|
||||||
|
"auto-propagate\t" database "\t"
|
||||||
|
(if propagate? "yes\n" "no\n")))))
|
||||||
|
|
||||||
|
(match config
|
||||||
|
(($ <nscd-configuration> log-file debug-level caches)
|
||||||
|
(text-file "nscd.conf"
|
||||||
|
(string-append "\
|
||||||
|
# Configuration of libc's name service cache daemon (nscd).\n\n"
|
||||||
|
(if log-file
|
||||||
|
(string-append "logfile\t" log-file)
|
||||||
|
"")
|
||||||
|
"\n"
|
||||||
|
(if debug-level
|
||||||
|
(string-append "debug-level\t"
|
||||||
|
(number->string debug-level))
|
||||||
|
"")
|
||||||
|
"\n"
|
||||||
|
(string-concatenate
|
||||||
|
(map cache->config caches)))))))
|
||||||
|
|
||||||
|
(define* (nscd-service #:optional (config %nscd-default-configuration)
|
||||||
|
#:key (glibc (canonical-package glibc)))
|
||||||
|
"Return a service that runs libc's name service cache daemon (nscd) with the
|
||||||
|
given @var{config}---an @code{<nscd-configuration>} object."
|
||||||
|
(mlet %store-monad ((nscd.conf (nscd.conf-file config)))
|
||||||
(return (service
|
(return (service
|
||||||
(documentation "Run libc's name service cache daemon (nscd).")
|
(documentation "Run libc's name service cache daemon (nscd).")
|
||||||
(provision '(nscd))
|
(provision '(nscd))
|
||||||
|
@ -388,7 +501,7 @@ the ``message of the day''."
|
||||||
|
|
||||||
(start #~(make-forkexec-constructor
|
(start #~(make-forkexec-constructor
|
||||||
(list (string-append #$glibc "/sbin/nscd")
|
(list (string-append #$glibc "/sbin/nscd")
|
||||||
"-f" "/dev/null" "--foreground")))
|
"-f" #$nscd.conf "--foreground")))
|
||||||
(stop #~(make-kill-destructor))
|
(stop #~(make-kill-destructor))
|
||||||
|
|
||||||
(respawn? #f)))))
|
(respawn? #f)))))
|
||||||
|
|
|
@ -80,60 +80,62 @@ fe80::1%lo0 apps.facebook.com\n")
|
||||||
gateway
|
gateway
|
||||||
(provision '(networking))
|
(provision '(networking))
|
||||||
(name-servers '())
|
(name-servers '())
|
||||||
(inetutils inetutils)
|
|
||||||
(net-tools net-tools))
|
(net-tools net-tools))
|
||||||
"Return a service that starts @var{interface} with address @var{ip}. If
|
"Return a service that starts @var{interface} with address @var{ip}. If
|
||||||
@var{gateway} is true, it must be a string specifying the default network
|
@var{gateway} is true, it must be a string specifying the default network
|
||||||
gateway."
|
gateway."
|
||||||
|
(define loopback?
|
||||||
|
(memq 'loopback provision))
|
||||||
|
|
||||||
;; TODO: Eventually we should do this using Guile's networking procedures,
|
;; TODO: Eventually replace 'route' with bindings for the appropriate
|
||||||
;; like 'configure-qemu-networking' does, but the patch that does this is
|
;; ioctls.
|
||||||
;; not yet in stock Guile.
|
|
||||||
(with-monad %store-monad
|
(with-monad %store-monad
|
||||||
(return
|
(return
|
||||||
(service
|
(service
|
||||||
|
|
||||||
;; Unless we're providing the loopback interface, wait for udev to be up
|
;; Unless we're providing the loopback interface, wait for udev to be up
|
||||||
;; and running so that INTERFACE is actually usable.
|
;; and running so that INTERFACE is actually usable.
|
||||||
(requirement (if (memq 'loopback provision)
|
(requirement (if loopback? '() '(udev)))
|
||||||
'()
|
|
||||||
'(udev)))
|
|
||||||
|
|
||||||
(documentation
|
(documentation
|
||||||
"Bring up the networking interface using a static IP address.")
|
"Bring up the networking interface using a static IP address.")
|
||||||
(provision provision)
|
(provision provision)
|
||||||
(start #~(lambda _
|
(start #~(lambda _
|
||||||
;; Return #t if successfully started.
|
;; Return #t if successfully started.
|
||||||
(and (zero? (system* (string-append #$inetutils
|
(let* ((addr (inet-pton AF_INET #$ip))
|
||||||
"/bin/ifconfig")
|
(sockaddr (make-socket-address AF_INET addr 0)))
|
||||||
"-i" #$interface "-A" #$ip
|
(configure-network-interface #$interface sockaddr
|
||||||
"-i" #$interface "--up"))
|
(logior IFF_UP
|
||||||
#$(if gateway
|
#$(if loopback?
|
||||||
#~(zero? (system* (string-append #$net-tools
|
#~IFF_LOOPBACK
|
||||||
"/sbin/route")
|
0))))
|
||||||
"add" "-net" "default"
|
#$(if gateway
|
||||||
"gw" #$gateway))
|
#~(zero? (system* (string-append #$net-tools
|
||||||
#t)
|
"/sbin/route")
|
||||||
#$(if (pair? name-servers)
|
"add" "-net" "default"
|
||||||
#~(call-with-output-file "/etc/resolv.conf"
|
"gw" #$gateway))
|
||||||
(lambda (port)
|
#t)
|
||||||
(display
|
#$(if (pair? name-servers)
|
||||||
"# Generated by 'static-networking-service'.\n"
|
#~(call-with-output-file "/etc/resolv.conf"
|
||||||
port)
|
(lambda (port)
|
||||||
(for-each (lambda (server)
|
(display
|
||||||
(format port "nameserver ~a~%"
|
"# Generated by 'static-networking-service'.\n"
|
||||||
server))
|
port)
|
||||||
'#$name-servers)))
|
(for-each (lambda (server)
|
||||||
#t))))
|
(format port "nameserver ~a~%"
|
||||||
|
server))
|
||||||
|
'#$name-servers)))
|
||||||
|
#t)))
|
||||||
(stop #~(lambda _
|
(stop #~(lambda _
|
||||||
;; Return #f is successfully stopped.
|
;; Return #f is successfully stopped.
|
||||||
(not (and (system* (string-append #$inetutils "/bin/ifconfig")
|
(let ((sock (socket AF_INET SOCK_STREAM 0)))
|
||||||
#$interface "down")
|
(set-network-interface-flags sock #$interface 0)
|
||||||
#$(if gateway
|
(close-port sock))
|
||||||
#~(system* (string-append #$net-tools
|
(not #$(if gateway
|
||||||
"/sbin/route")
|
#~(system* (string-append #$net-tools
|
||||||
"del" "-net" "default")
|
"/sbin/route")
|
||||||
#t)))))
|
"del" "-net" "default")
|
||||||
|
#t))))
|
||||||
(respawn? #f)))))
|
(respawn? #f)))))
|
||||||
|
|
||||||
(define* (dhcp-client-service #:key (dhcp isc-dhcp))
|
(define* (dhcp-client-service #:key (dhcp isc-dhcp))
|
||||||
|
|
|
@ -36,7 +36,7 @@
|
||||||
#:use-module (srfi srfi-26)
|
#:use-module (srfi srfi-26)
|
||||||
#:use-module (ice-9 match)
|
#:use-module (ice-9 match)
|
||||||
#:export (xorg-start-command
|
#:export (xorg-start-command
|
||||||
|
%default-xsessions
|
||||||
%default-slim-theme
|
%default-slim-theme
|
||||||
%default-slim-theme-name
|
%default-slim-theme-name
|
||||||
slim-service))
|
slim-service))
|
||||||
|
@ -136,9 +136,10 @@ EndSection
|
||||||
|
|
||||||
(define* (xinitrc #:key
|
(define* (xinitrc #:key
|
||||||
(guile (canonical-package guile-2.0))
|
(guile (canonical-package guile-2.0))
|
||||||
(ratpoison ratpoison)
|
fallback-session)
|
||||||
(windowmaker windowmaker))
|
"Return a system-wide xinitrc script that starts the specified X session,
|
||||||
"Return a system-wide xinitrc script that starts the specified X session."
|
which should be passed to this script as the first argument. If not, the
|
||||||
|
@var{fallback-session} will be used."
|
||||||
(define builder
|
(define builder
|
||||||
#~(begin
|
#~(begin
|
||||||
(use-modules (ice-9 match))
|
(use-modules (ice-9 match))
|
||||||
|
@ -155,20 +156,14 @@ EndSection
|
||||||
(execl shell shell "--login" "-c"
|
(execl shell shell "--login" "-c"
|
||||||
(string-join (cons command args))))))
|
(string-join (cons command args))))))
|
||||||
|
|
||||||
;; First, try to run ~/.xsession.
|
(let ((home (getenv "HOME"))
|
||||||
(let* ((home (getenv "HOME"))
|
(session (match (command-line)
|
||||||
(xsession (string-append home "/.xsession")))
|
((_ x) x)
|
||||||
(exec-from-login-shell xsession))
|
(_ #$fallback-session))))
|
||||||
|
;; First, try to run ~/.xsession.
|
||||||
;; Then try a pre-configured session type.
|
(exec-from-login-shell (string-append home "/.xsession"))
|
||||||
(let ((ratpoison (string-append #$ratpoison "/bin/ratpoison"))
|
;; Then try to start the specified session.
|
||||||
(wmaker (string-append #$windowmaker "/bin/wmaker")))
|
(exec-from-login-shell session))))
|
||||||
(match (command-line)
|
|
||||||
((_ "ratpoison")
|
|
||||||
(exec-from-login-shell ratpoison))
|
|
||||||
(_
|
|
||||||
(exec-from-login-shell wmaker))))))
|
|
||||||
|
|
||||||
(gexp->script "xinitrc" builder))
|
(gexp->script "xinitrc" builder))
|
||||||
|
|
||||||
|
|
||||||
|
@ -176,6 +171,35 @@ EndSection
|
||||||
;;; SLiM log-in manager.
|
;;; SLiM log-in manager.
|
||||||
;;;
|
;;;
|
||||||
|
|
||||||
|
(define %default-xsessions
|
||||||
|
;; Default xsessions available for log-in manager, representing as a list of
|
||||||
|
;; monadic desktop entries.
|
||||||
|
(list (text-file* "wmaker.desktop" "
|
||||||
|
[Desktop Entry]
|
||||||
|
Name=Window Maker
|
||||||
|
Exec=" windowmaker "/bin/wmaker
|
||||||
|
Type=Application
|
||||||
|
")
|
||||||
|
(text-file* "ratpoison.desktop" "
|
||||||
|
[Desktop Entry]
|
||||||
|
Name=Ratpoison
|
||||||
|
Exec=" ratpoison "/bin/ratpoison
|
||||||
|
Type=Application
|
||||||
|
")))
|
||||||
|
|
||||||
|
(define (xsessions-directory sessions)
|
||||||
|
"Return a directory containing SESSIONS, which should be a list of monadic
|
||||||
|
desktop entries."
|
||||||
|
(mlet %store-monad ((sessions (sequence %store-monad sessions)))
|
||||||
|
(define builder
|
||||||
|
#~(begin
|
||||||
|
(mkdir #$output)
|
||||||
|
(for-each (lambda (session)
|
||||||
|
(symlink session (string-append #$output "/"
|
||||||
|
(basename session))))
|
||||||
|
'#$sessions)))
|
||||||
|
(gexp->derivation "xsessions-dir" builder)))
|
||||||
|
|
||||||
(define %default-slim-theme
|
(define %default-slim-theme
|
||||||
;; Theme based on work by Felipe López.
|
;; Theme based on work by Felipe López.
|
||||||
#~(string-append #$%artwork-repository "/slim"))
|
#~(string-append #$%artwork-repository "/slim"))
|
||||||
|
@ -191,6 +215,9 @@ EndSection
|
||||||
(theme %default-slim-theme)
|
(theme %default-slim-theme)
|
||||||
(theme-name %default-slim-theme-name)
|
(theme-name %default-slim-theme-name)
|
||||||
(xauth xauth) (dmd dmd) (bash bash)
|
(xauth xauth) (dmd dmd) (bash bash)
|
||||||
|
(sessions %default-xsessions)
|
||||||
|
(auto-login-session #~(string-append #$windowmaker
|
||||||
|
"/bin/wmaker"))
|
||||||
startx)
|
startx)
|
||||||
"Return a service that spawns the SLiM graphical login manager, which in
|
"Return a service that spawns the SLiM graphical login manager, which in
|
||||||
turn starts the X display server with @var{startx}, a command as returned by
|
turn starts the X display server with @var{startx}, a command as returned by
|
||||||
|
@ -198,7 +225,7 @@ turn starts the X display server with @var{startx}, a command as returned by
|
||||||
|
|
||||||
When @var{allow-empty-passwords?} is true, allow logins with an empty
|
When @var{allow-empty-passwords?} is true, allow logins with an empty
|
||||||
password. When @var{auto-login?} is true, log in automatically as
|
password. When @var{auto-login?} is true, log in automatically as
|
||||||
@var{default-user}.
|
@var{default-user} with @var{auto-login-session}.
|
||||||
|
|
||||||
If @var{theme} is @code{#f}, the use the default log-in theme; otherwise
|
If @var{theme} is @code{#f}, the use the default log-in theme; otherwise
|
||||||
@var{theme} must be a gexp denoting the name of a directory containing the
|
@var{theme} must be a gexp denoting the name of a directory containing the
|
||||||
|
@ -207,7 +234,9 @@ theme."
|
||||||
|
|
||||||
(define (slim.cfg)
|
(define (slim.cfg)
|
||||||
(mlet %store-monad ((startx (or startx (xorg-start-command)))
|
(mlet %store-monad ((startx (or startx (xorg-start-command)))
|
||||||
(xinitrc (xinitrc)))
|
(xinitrc (xinitrc #:fallback-session
|
||||||
|
auto-login-session))
|
||||||
|
(sessiondir (xsessions-directory sessions)))
|
||||||
(text-file* "slim.cfg" "
|
(text-file* "slim.cfg" "
|
||||||
default_path /run/current-system/profile/bin
|
default_path /run/current-system/profile/bin
|
||||||
default_xserver " startx "
|
default_xserver " startx "
|
||||||
|
@ -218,7 +247,7 @@ authfile /var/run/slim.auth
|
||||||
# The login command. '%session' is replaced by the chosen session name, one
|
# The login command. '%session' is replaced by the chosen session name, one
|
||||||
# of the names specified in the 'sessions' setting: 'wmaker', 'xfce', etc.
|
# of the names specified in the 'sessions' setting: 'wmaker', 'xfce', etc.
|
||||||
login_cmd exec " xinitrc " %session
|
login_cmd exec " xinitrc " %session
|
||||||
sessions wmaker,ratpoison
|
sessiondir " sessiondir "
|
||||||
|
|
||||||
halt_cmd " dmd "/sbin/halt
|
halt_cmd " dmd "/sbin/halt
|
||||||
reboot_cmd " dmd "/sbin/reboot
|
reboot_cmd " dmd "/sbin/reboot
|
||||||
|
|
|
@ -145,6 +145,14 @@ configuration template file in the installation system."
|
||||||
#~(unless (file-exists? #$local-template)
|
#~(unless (file-exists? #$local-template)
|
||||||
(copy-file #$template #$local-template)))))))
|
(copy-file #$template #$local-template)))))))
|
||||||
|
|
||||||
|
(define %nscd-minimal-caches
|
||||||
|
;; Minimal in-memory caching policy for nscd.
|
||||||
|
(list (nscd-cache (database 'hosts)
|
||||||
|
(positive-time-to-live (* 3600 12))
|
||||||
|
(negative-time-to-live 20)
|
||||||
|
(persistent? #f)
|
||||||
|
(max-database-size (* 5 (expt 2 20)))))) ;5 MiB
|
||||||
|
|
||||||
(define (installation-services)
|
(define (installation-services)
|
||||||
"Return the list services for the installation image."
|
"Return the list services for the installation image."
|
||||||
(let ((motd (text-file "motd" "
|
(let ((motd (text-file "motd" "
|
||||||
|
@ -206,7 +214,10 @@ You have been warned. Thanks for being so brave.
|
||||||
(console-font-service "tty5")
|
(console-font-service "tty5")
|
||||||
(console-font-service "tty6")
|
(console-font-service "tty6")
|
||||||
|
|
||||||
(nscd-service))))
|
;; Since this is running on a USB stick with a unionfs as the root
|
||||||
|
;; file system, use an appropriate cache configuration.
|
||||||
|
(nscd-service (nscd-configuration
|
||||||
|
(caches %nscd-minimal-caches))))))
|
||||||
|
|
||||||
(define %issue
|
(define %issue
|
||||||
;; Greeting.
|
;; Greeting.
|
||||||
|
|
|
@ -55,8 +55,7 @@ PYTHON-BUILD-SYSTEM, such that it is compiled with PYTHON instead. The
|
||||||
inputs are changed recursively accordingly. If the name of P starts with
|
inputs are changed recursively accordingly. If the name of P starts with
|
||||||
OLD-PREFIX, this is replaced by NEW-PREFIX; otherwise, NEW-PREFIX is
|
OLD-PREFIX, this is replaced by NEW-PREFIX; otherwise, NEW-PREFIX is
|
||||||
prepended to the name."
|
prepended to the name."
|
||||||
(let* ((build-system (package-build-system p))
|
(let* ((rewrite-if-package
|
||||||
(rewrite-if-package
|
|
||||||
(lambda (content)
|
(lambda (content)
|
||||||
;; CONTENT may be a file name, in which case it is returned, or a
|
;; CONTENT may be a file name, in which case it is returned, or a
|
||||||
;; package, which is rewritten with the new PYTHON and NEW-PREFIX.
|
;; package, which is rewritten with the new PYTHON and NEW-PREFIX.
|
||||||
|
@ -68,28 +67,23 @@ prepended to the name."
|
||||||
(match-lambda
|
(match-lambda
|
||||||
((name content . rest)
|
((name content . rest)
|
||||||
(append (list name (rewrite-if-package content)) rest)))))
|
(append (list name (rewrite-if-package content)) rest)))))
|
||||||
(package (inherit p)
|
|
||||||
(name
|
(if (eq? (package-build-system p) python-build-system)
|
||||||
(let ((name (package-name p)))
|
(package (inherit p)
|
||||||
(if (eq? build-system python-build-system)
|
(name (let ((name (package-name p)))
|
||||||
(string-append new-prefix
|
(string-append new-prefix
|
||||||
(if (string-prefix? old-prefix name)
|
(if (string-prefix? old-prefix name)
|
||||||
(substring name (string-length old-prefix))
|
(substring name (string-length old-prefix))
|
||||||
name))
|
name))))
|
||||||
name)))
|
(arguments
|
||||||
(arguments
|
(let ((arguments (package-arguments p)))
|
||||||
(let ((arguments (package-arguments p)))
|
(if (member #:python arguments)
|
||||||
(if (eq? build-system python-build-system)
|
(substitute-keyword-arguments arguments ((#:python p) python))
|
||||||
(if (member #:python arguments)
|
(append arguments `(#:python ,python)))))
|
||||||
(substitute-keyword-arguments arguments ((#:python p) python))
|
(inputs (map rewrite (package-inputs p)))
|
||||||
(append arguments `(#:python ,python)))
|
(propagated-inputs (map rewrite (package-propagated-inputs p)))
|
||||||
arguments)))
|
(native-inputs (map rewrite (package-native-inputs p))))
|
||||||
(inputs
|
p)))
|
||||||
(map rewrite (package-inputs p)))
|
|
||||||
(propagated-inputs
|
|
||||||
(map rewrite (package-propagated-inputs p)))
|
|
||||||
(native-inputs
|
|
||||||
(map rewrite (package-native-inputs p))))))
|
|
||||||
|
|
||||||
(define package-with-python2
|
(define package-with-python2
|
||||||
(cut package-with-explicit-python <> (default-python2) "python-" "python2-"))
|
(cut package-with-explicit-python <> (default-python2) "python-" "python2-"))
|
||||||
|
|
|
@ -105,19 +105,36 @@
|
||||||
files)))
|
files)))
|
||||||
bindirs)))
|
bindirs)))
|
||||||
|
|
||||||
|
(define* (rename-pth-file #:key name inputs outputs #:allow-other-keys)
|
||||||
|
"Rename easy-install.pth to NAME.pth to avoid conflicts between packages
|
||||||
|
installed with setuptools."
|
||||||
|
(let* ((out (assoc-ref outputs "out"))
|
||||||
|
(python (assoc-ref inputs "python"))
|
||||||
|
(site-packages (string-append out "/lib/python"
|
||||||
|
(get-python-version python)
|
||||||
|
"/site-packages"))
|
||||||
|
(easy-install-pth (string-append site-packages "/easy-install.pth"))
|
||||||
|
(new-pth (string-append site-packages "/" name ".pth")))
|
||||||
|
(when (file-exists? easy-install-pth)
|
||||||
|
(rename-file easy-install-pth new-pth))
|
||||||
|
#t))
|
||||||
|
|
||||||
(define %standard-phases
|
(define %standard-phases
|
||||||
;; 'configure' and 'build' phases are not needed. Everything is done during
|
;; 'configure' and 'build' phases are not needed. Everything is done during
|
||||||
;; 'install'.
|
;; 'install'.
|
||||||
(alist-cons-after
|
(alist-cons-before
|
||||||
'install 'wrap
|
'strip 'rename-pth-file
|
||||||
wrap
|
rename-pth-file
|
||||||
(alist-replace
|
(alist-cons-after
|
||||||
'build build
|
'install 'wrap
|
||||||
|
wrap
|
||||||
(alist-replace
|
(alist-replace
|
||||||
'check check
|
'build build
|
||||||
(alist-replace 'install install
|
(alist-replace
|
||||||
(alist-delete 'configure
|
'check check
|
||||||
gnu:%standard-phases))))))
|
(alist-replace 'install install
|
||||||
|
(alist-delete 'configure
|
||||||
|
gnu:%standard-phases)))))))
|
||||||
|
|
||||||
(define* (python-build #:key inputs (phases %standard-phases)
|
(define* (python-build #:key inputs (phases %standard-phases)
|
||||||
#:allow-other-keys #:rest args)
|
#:allow-other-keys #:rest args)
|
||||||
|
|
|
@ -42,7 +42,11 @@
|
||||||
all-network-interfaces
|
all-network-interfaces
|
||||||
network-interfaces
|
network-interfaces
|
||||||
network-interface-flags
|
network-interface-flags
|
||||||
loopback-network-interface?))
|
loopback-network-interface?
|
||||||
|
network-interface-address
|
||||||
|
set-network-interface-flags
|
||||||
|
set-network-interface-address
|
||||||
|
configure-network-interface))
|
||||||
|
|
||||||
;;; Commentary:
|
;;; Commentary:
|
||||||
;;;
|
;;;
|
||||||
|
@ -228,6 +232,77 @@ user-land process."
|
||||||
(scandir "/proc"))
|
(scandir "/proc"))
|
||||||
<))
|
<))
|
||||||
|
|
||||||
|
|
||||||
|
;;;
|
||||||
|
;;; Packed structures.
|
||||||
|
;;;
|
||||||
|
|
||||||
|
(define-syntax sizeof*
|
||||||
|
;; XXX: This duplicates 'compile-time-value'.
|
||||||
|
(syntax-rules (int128)
|
||||||
|
((_ int128)
|
||||||
|
16)
|
||||||
|
((_ type)
|
||||||
|
(let-syntax ((v (lambda (s)
|
||||||
|
(let ((val (sizeof type)))
|
||||||
|
(syntax-case s ()
|
||||||
|
(_ val))))))
|
||||||
|
v))))
|
||||||
|
|
||||||
|
(define-syntax type-size
|
||||||
|
(syntax-rules (~)
|
||||||
|
((_ (type ~ order))
|
||||||
|
(sizeof* type))
|
||||||
|
((_ type)
|
||||||
|
(sizeof* type))))
|
||||||
|
|
||||||
|
(define-syntax write-type
|
||||||
|
(syntax-rules (~)
|
||||||
|
((_ bv offset (type ~ order) value)
|
||||||
|
(bytevector-uint-set! bv offset value
|
||||||
|
(endianness order) (sizeof* type)))
|
||||||
|
((_ bv offset type value)
|
||||||
|
(bytevector-uint-set! bv offset value
|
||||||
|
(native-endianness) (sizeof* type)))))
|
||||||
|
|
||||||
|
(define-syntax write-types
|
||||||
|
(syntax-rules ()
|
||||||
|
((_ bv offset () ())
|
||||||
|
#t)
|
||||||
|
((_ bv offset (type0 types ...) (field0 fields ...))
|
||||||
|
(begin
|
||||||
|
(write-type bv offset type0 field0)
|
||||||
|
(write-types bv (+ offset (type-size type0))
|
||||||
|
(types ...) (fields ...))))))
|
||||||
|
|
||||||
|
(define-syntax read-type
|
||||||
|
(syntax-rules (~)
|
||||||
|
((_ bv offset (type ~ order))
|
||||||
|
(bytevector-uint-ref bv offset
|
||||||
|
(endianness order) (sizeof* type)))
|
||||||
|
((_ bv offset type)
|
||||||
|
(bytevector-uint-ref bv offset
|
||||||
|
(native-endianness) (sizeof* type)))))
|
||||||
|
|
||||||
|
(define-syntax read-types
|
||||||
|
(syntax-rules ()
|
||||||
|
((_ bv offset ())
|
||||||
|
'())
|
||||||
|
((_ bv offset (type0 types ...))
|
||||||
|
(cons (read-type bv offset type0)
|
||||||
|
(read-types bv (+ offset (type-size type0)) (types ...))))))
|
||||||
|
|
||||||
|
(define-syntax define-c-struct
|
||||||
|
(syntax-rules ()
|
||||||
|
"Define READ as an optimized serializer and WRITE! as a deserializer for
|
||||||
|
the C structure with the given TYPES."
|
||||||
|
((_ name read write! (fields types) ...)
|
||||||
|
(begin
|
||||||
|
(define (write! bv offset fields ...)
|
||||||
|
(write-types bv offset (types ...) (fields ...)))
|
||||||
|
(define (read bv offset)
|
||||||
|
(read-types bv offset (types ...)))))))
|
||||||
|
|
||||||
|
|
||||||
;;;
|
;;;
|
||||||
;;; Network interfaces.
|
;;; Network interfaces.
|
||||||
|
@ -241,6 +316,18 @@ user-land process."
|
||||||
(if (string-contains %host-type "linux")
|
(if (string-contains %host-type "linux")
|
||||||
#x8913 ;GNU/Linux
|
#x8913 ;GNU/Linux
|
||||||
#xc4804191)) ;GNU/Hurd
|
#xc4804191)) ;GNU/Hurd
|
||||||
|
(define SIOCSIFFLAGS
|
||||||
|
(if (string-contains %host-type "linux")
|
||||||
|
#x8914 ;GNU/Linux
|
||||||
|
-1)) ;FIXME: GNU/Hurd?
|
||||||
|
(define SIOCGIFADDR
|
||||||
|
(if (string-contains %host-type "linux")
|
||||||
|
#x8915 ;GNU/Linux
|
||||||
|
-1)) ;FIXME: GNU/Hurd?
|
||||||
|
(define SIOCSIFADDR
|
||||||
|
(if (string-contains %host-type "linux")
|
||||||
|
#x8916 ;GNU/Linux
|
||||||
|
-1)) ;FIXME: GNU/Hurd?
|
||||||
|
|
||||||
;; Flags and constants from <net/if.h>.
|
;; Flags and constants from <net/if.h>.
|
||||||
|
|
||||||
|
@ -263,6 +350,56 @@ user-land process."
|
||||||
40
|
40
|
||||||
32))
|
32))
|
||||||
|
|
||||||
|
(define-c-struct sockaddr-in ;<linux/in.h>
|
||||||
|
read-sockaddr-in
|
||||||
|
write-sockaddr-in!
|
||||||
|
(family unsigned-short)
|
||||||
|
(port (int16 ~ big))
|
||||||
|
(address (int32 ~ big)))
|
||||||
|
|
||||||
|
(define-c-struct sockaddr-in6 ;<linux/in6.h>
|
||||||
|
read-sockaddr-in6
|
||||||
|
write-sockaddr-in6!
|
||||||
|
(family unsigned-short)
|
||||||
|
(port (int16 ~ big))
|
||||||
|
(flowinfo (int32 ~ big))
|
||||||
|
(address (int128 ~ big))
|
||||||
|
(scopeid int32))
|
||||||
|
|
||||||
|
(define (write-socket-address! sockaddr bv index)
|
||||||
|
"Write SOCKADDR, a socket address as returned by 'make-socket-address', to
|
||||||
|
bytevector BV at INDEX."
|
||||||
|
(let ((family (sockaddr:fam sockaddr)))
|
||||||
|
(cond ((= family AF_INET)
|
||||||
|
(write-sockaddr-in! bv index
|
||||||
|
family
|
||||||
|
(sockaddr:port sockaddr)
|
||||||
|
(sockaddr:addr sockaddr)))
|
||||||
|
((= family AF_INET6)
|
||||||
|
(write-sockaddr-in6! bv index
|
||||||
|
family
|
||||||
|
(sockaddr:port sockaddr)
|
||||||
|
(sockaddr:flowinfo sockaddr)
|
||||||
|
(sockaddr:addr sockaddr)
|
||||||
|
(sockaddr:scopeid sockaddr)))
|
||||||
|
(else
|
||||||
|
(error "unsupported socket address" sockaddr)))))
|
||||||
|
|
||||||
|
(define (read-socket-address bv index)
|
||||||
|
"Read a socket address from bytevector BV at INDEX."
|
||||||
|
(let ((family (bytevector-u16-native-ref bv index)))
|
||||||
|
(cond ((= family AF_INET)
|
||||||
|
(match (read-sockaddr-in bv index)
|
||||||
|
((family port address)
|
||||||
|
(make-socket-address family address port))))
|
||||||
|
((= family AF_INET6)
|
||||||
|
(match (read-sockaddr-in6 bv index)
|
||||||
|
((family port flowinfo address scopeid)
|
||||||
|
(make-socket-address family address port
|
||||||
|
flowinfo scopeid))))
|
||||||
|
(else
|
||||||
|
"unsupported socket address family" family))))
|
||||||
|
|
||||||
(define %ioctl
|
(define %ioctl
|
||||||
;; The most terrible interface, live from Scheme.
|
;; The most terrible interface, live from Scheme.
|
||||||
(pointer->procedure int
|
(pointer->procedure int
|
||||||
|
@ -354,4 +491,65 @@ interface NAME."
|
||||||
(close-port sock)
|
(close-port sock)
|
||||||
(not (zero? (logand flags IFF_LOOPBACK)))))
|
(not (zero? (logand flags IFF_LOOPBACK)))))
|
||||||
|
|
||||||
|
(define (set-network-interface-flags socket name flags)
|
||||||
|
"Set the flag of network interface NAME to FLAGS."
|
||||||
|
(let ((req (make-bytevector ifreq-struct-size)))
|
||||||
|
(bytevector-copy! (string->utf8 name) 0 req 0
|
||||||
|
(min (string-length name) (- IF_NAMESIZE 1)))
|
||||||
|
;; Set the 'ifr_flags' field.
|
||||||
|
(bytevector-uint-set! req IF_NAMESIZE flags (native-endianness)
|
||||||
|
(sizeof short))
|
||||||
|
(let* ((ret (%ioctl (fileno socket) SIOCSIFFLAGS
|
||||||
|
(bytevector->pointer req)))
|
||||||
|
(err (errno)))
|
||||||
|
(unless (zero? ret)
|
||||||
|
(throw 'system-error "set-network-interface-flags"
|
||||||
|
"set-network-interface-flags on ~A: ~A"
|
||||||
|
(list name (strerror err))
|
||||||
|
(list err))))))
|
||||||
|
|
||||||
|
(define (set-network-interface-address socket name sockaddr)
|
||||||
|
"Set the address of network interface NAME to SOCKADDR."
|
||||||
|
(let ((req (make-bytevector ifreq-struct-size)))
|
||||||
|
(bytevector-copy! (string->utf8 name) 0 req 0
|
||||||
|
(min (string-length name) (- IF_NAMESIZE 1)))
|
||||||
|
;; Set the 'ifr_addr' field.
|
||||||
|
(write-socket-address! sockaddr req IF_NAMESIZE)
|
||||||
|
(let* ((ret (%ioctl (fileno socket) SIOCSIFADDR
|
||||||
|
(bytevector->pointer req)))
|
||||||
|
(err (errno)))
|
||||||
|
(unless (zero? ret)
|
||||||
|
(throw 'system-error "set-network-interface-address"
|
||||||
|
"set-network-interface-address on ~A: ~A"
|
||||||
|
(list name (strerror err))
|
||||||
|
(list err))))))
|
||||||
|
|
||||||
|
(define (network-interface-address socket name)
|
||||||
|
"Return the address of network interface NAME. The result is an object of
|
||||||
|
the same type as that returned by 'make-socket-address'."
|
||||||
|
(let ((req (make-bytevector ifreq-struct-size)))
|
||||||
|
(bytevector-copy! (string->utf8 name) 0 req 0
|
||||||
|
(min (string-length name) (- IF_NAMESIZE 1)))
|
||||||
|
(let* ((ret (%ioctl (fileno socket) SIOCGIFADDR
|
||||||
|
(bytevector->pointer req)))
|
||||||
|
(err (errno)))
|
||||||
|
(if (zero? ret)
|
||||||
|
(read-socket-address req IF_NAMESIZE)
|
||||||
|
(throw 'system-error "network-interface-address"
|
||||||
|
"network-interface-address on ~A: ~A"
|
||||||
|
(list name (strerror err))
|
||||||
|
(list err))))))
|
||||||
|
|
||||||
|
(define (configure-network-interface name sockaddr flags)
|
||||||
|
"Configure network interface NAME to use SOCKADDR, an address as returned by
|
||||||
|
'make-socket-address', and FLAGS, a bitwise-or of IFF_* constants."
|
||||||
|
(let ((sock (socket (sockaddr:fam sockaddr) SOCK_STREAM 0)))
|
||||||
|
(dynamic-wind
|
||||||
|
(const #t)
|
||||||
|
(lambda ()
|
||||||
|
(set-network-interface-address sock name sockaddr)
|
||||||
|
(set-network-interface-flags sock name flags))
|
||||||
|
(lambda ()
|
||||||
|
(close-port sock)))))
|
||||||
|
|
||||||
;;; syscalls.scm ends here
|
;;; syscalls.scm ends here
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
;;; GNU Guix --- Functional package management for GNU
|
;;; GNU Guix --- Functional package management for GNU
|
||||||
;;; Copyright © 2012, 2013, 2014 Ludovic Courtès <ludo@gnu.org>
|
;;; Copyright © 2012, 2013, 2014 Ludovic Courtès <ludo@gnu.org>
|
||||||
|
;;; Copyright © 2014 Mark H Weaver <mhw@netris.org>
|
||||||
;;;
|
;;;
|
||||||
;;; This file is part of GNU Guix.
|
;;; This file is part of GNU Guix.
|
||||||
;;;
|
;;;
|
||||||
|
@ -546,40 +547,38 @@ for the host system (\"native inputs\"), and not target inputs."
|
||||||
recursively."
|
recursively."
|
||||||
(transitive-inputs (package-propagated-inputs package)))
|
(transitive-inputs (package-propagated-inputs package)))
|
||||||
|
|
||||||
(define-syntax-rule (first-value exp)
|
(define-syntax define-memoized/v
|
||||||
"Truncate all but the first value returned by EXP."
|
(lambda (form)
|
||||||
(call-with-values (lambda () exp)
|
"Define a memoized single-valued unary procedure with docstring.
|
||||||
(lambda (result . _)
|
The procedure argument is compared to cached keys using `eqv?'."
|
||||||
result)))
|
(syntax-case form ()
|
||||||
|
((_ (proc arg) docstring body body* ...)
|
||||||
|
(string? (syntax->datum #'docstring))
|
||||||
|
#'(define proc
|
||||||
|
(let ((cache (make-hash-table)))
|
||||||
|
(define (proc arg)
|
||||||
|
docstring
|
||||||
|
(match (hashv-get-handle cache arg)
|
||||||
|
((_ . value)
|
||||||
|
value)
|
||||||
|
(_
|
||||||
|
(let ((result (let () body body* ...)))
|
||||||
|
(hashv-set! cache arg result)
|
||||||
|
result))))
|
||||||
|
proc))))))
|
||||||
|
|
||||||
(define (package-transitive-supported-systems package)
|
(define-memoized/v (package-transitive-supported-systems package)
|
||||||
"Return the intersection of the systems supported by PACKAGE and those
|
"Return the intersection of the systems supported by PACKAGE and those
|
||||||
supported by its dependencies."
|
supported by its dependencies."
|
||||||
(first-value
|
(fold (lambda (input systems)
|
||||||
(let loop ((package package)
|
(match input
|
||||||
(systems (package-supported-systems package))
|
((label (? package? p) . _)
|
||||||
(visited vlist-null))
|
(lset-intersection
|
||||||
(match (vhash-assq package visited)
|
string=? systems (package-transitive-supported-systems p)))
|
||||||
((_ . result)
|
(_
|
||||||
(values (lset-intersection string=? systems result)
|
systems)))
|
||||||
visited))
|
(package-supported-systems package)
|
||||||
(#f
|
(package-direct-inputs package)))
|
||||||
(call-with-values
|
|
||||||
(lambda ()
|
|
||||||
(fold2 (lambda (input systems visited)
|
|
||||||
(match input
|
|
||||||
((label (? package? package) . _)
|
|
||||||
(loop package systems visited))
|
|
||||||
(_
|
|
||||||
(values systems visited))))
|
|
||||||
(lset-intersection string=?
|
|
||||||
systems
|
|
||||||
(package-supported-systems package))
|
|
||||||
visited
|
|
||||||
(package-direct-inputs package)))
|
|
||||||
(lambda (systems visited)
|
|
||||||
(values systems
|
|
||||||
(vhash-consq package systems visited)))))))))
|
|
||||||
|
|
||||||
(define (bag-transitive-inputs bag)
|
(define (bag-transitive-inputs bag)
|
||||||
"Same as 'package-transitive-inputs', but applied to a bag."
|
"Same as 'package-transitive-inputs', but applied to a bag."
|
||||||
|
|
|
@ -414,7 +414,13 @@ INFO-DIR? is #f."
|
||||||
(return #f))))
|
(return #f))))
|
||||||
(define inputs
|
(define inputs
|
||||||
(if info-dir
|
(if info-dir
|
||||||
(cons info-dir (manifest-inputs manifest))
|
;; XXX: Here we use the tuple (INFO-DIR "out") just so that the list
|
||||||
|
;; is unambiguous for the gexp code when MANIFEST has a single input
|
||||||
|
;; denoted as a string (the pattern (DRV STRING) is normally
|
||||||
|
;; interpreted in a gexp as "the STRING output of DRV".). See
|
||||||
|
;; <http://lists.gnu.org/archive/html/guix-devel/2014-12/msg00292.html>.
|
||||||
|
(cons (list info-dir "out")
|
||||||
|
(manifest-inputs manifest))
|
||||||
(manifest-inputs manifest)))
|
(manifest-inputs manifest)))
|
||||||
|
|
||||||
(define builder
|
(define builder
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
Subproject commit e7720aa10a1da63bb15a4587837d649268944943
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
Most of the code is this directory was written by several people for
|
||||||
|
the Nix project (http://nixos.org/nix). Thanks!
|
|
@ -0,0 +1,504 @@
|
||||||
|
GNU LESSER GENERAL PUBLIC LICENSE
|
||||||
|
Version 2.1, February 1999
|
||||||
|
|
||||||
|
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
|
||||||
|
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
|
||||||
|
[This is the first released version of the Lesser GPL. It also counts
|
||||||
|
as the successor of the GNU Library Public License, version 2, hence
|
||||||
|
the version number 2.1.]
|
||||||
|
|
||||||
|
Preamble
|
||||||
|
|
||||||
|
The licenses for most software are designed to take away your
|
||||||
|
freedom to share and change it. By contrast, the GNU General Public
|
||||||
|
Licenses are intended to guarantee your freedom to share and change
|
||||||
|
free software--to make sure the software is free for all its users.
|
||||||
|
|
||||||
|
This license, the Lesser General Public License, applies to some
|
||||||
|
specially designated software packages--typically libraries--of the
|
||||||
|
Free Software Foundation and other authors who decide to use it. You
|
||||||
|
can use it too, but we suggest you first think carefully about whether
|
||||||
|
this license or the ordinary General Public License is the better
|
||||||
|
strategy to use in any particular case, based on the explanations below.
|
||||||
|
|
||||||
|
When we speak of free software, we are referring to freedom of use,
|
||||||
|
not price. Our General Public Licenses are designed to make sure that
|
||||||
|
you have the freedom to distribute copies of free software (and charge
|
||||||
|
for this service if you wish); that you receive source code or can get
|
||||||
|
it if you want it; that you can change the software and use pieces of
|
||||||
|
it in new free programs; and that you are informed that you can do
|
||||||
|
these things.
|
||||||
|
|
||||||
|
To protect your rights, we need to make restrictions that forbid
|
||||||
|
distributors to deny you these rights or to ask you to surrender these
|
||||||
|
rights. These restrictions translate to certain responsibilities for
|
||||||
|
you if you distribute copies of the library or if you modify it.
|
||||||
|
|
||||||
|
For example, if you distribute copies of the library, whether gratis
|
||||||
|
or for a fee, you must give the recipients all the rights that we gave
|
||||||
|
you. You must make sure that they, too, receive or can get the source
|
||||||
|
code. If you link other code with the library, you must provide
|
||||||
|
complete object files to the recipients, so that they can relink them
|
||||||
|
with the library after making changes to the library and recompiling
|
||||||
|
it. And you must show them these terms so they know their rights.
|
||||||
|
|
||||||
|
We protect your rights with a two-step method: (1) we copyright the
|
||||||
|
library, and (2) we offer you this license, which gives you legal
|
||||||
|
permission to copy, distribute and/or modify the library.
|
||||||
|
|
||||||
|
To protect each distributor, we want to make it very clear that
|
||||||
|
there is no warranty for the free library. Also, if the library is
|
||||||
|
modified by someone else and passed on, the recipients should know
|
||||||
|
that what they have is not the original version, so that the original
|
||||||
|
author's reputation will not be affected by problems that might be
|
||||||
|
introduced by others.
|
||||||
|
|
||||||
|
Finally, software patents pose a constant threat to the existence of
|
||||||
|
any free program. We wish to make sure that a company cannot
|
||||||
|
effectively restrict the users of a free program by obtaining a
|
||||||
|
restrictive license from a patent holder. Therefore, we insist that
|
||||||
|
any patent license obtained for a version of the library must be
|
||||||
|
consistent with the full freedom of use specified in this license.
|
||||||
|
|
||||||
|
Most GNU software, including some libraries, is covered by the
|
||||||
|
ordinary GNU General Public License. This license, the GNU Lesser
|
||||||
|
General Public License, applies to certain designated libraries, and
|
||||||
|
is quite different from the ordinary General Public License. We use
|
||||||
|
this license for certain libraries in order to permit linking those
|
||||||
|
libraries into non-free programs.
|
||||||
|
|
||||||
|
When a program is linked with a library, whether statically or using
|
||||||
|
a shared library, the combination of the two is legally speaking a
|
||||||
|
combined work, a derivative of the original library. The ordinary
|
||||||
|
General Public License therefore permits such linking only if the
|
||||||
|
entire combination fits its criteria of freedom. The Lesser General
|
||||||
|
Public License permits more lax criteria for linking other code with
|
||||||
|
the library.
|
||||||
|
|
||||||
|
We call this license the "Lesser" General Public License because it
|
||||||
|
does Less to protect the user's freedom than the ordinary General
|
||||||
|
Public License. It also provides other free software developers Less
|
||||||
|
of an advantage over competing non-free programs. These disadvantages
|
||||||
|
are the reason we use the ordinary General Public License for many
|
||||||
|
libraries. However, the Lesser license provides advantages in certain
|
||||||
|
special circumstances.
|
||||||
|
|
||||||
|
For example, on rare occasions, there may be a special need to
|
||||||
|
encourage the widest possible use of a certain library, so that it becomes
|
||||||
|
a de-facto standard. To achieve this, non-free programs must be
|
||||||
|
allowed to use the library. A more frequent case is that a free
|
||||||
|
library does the same job as widely used non-free libraries. In this
|
||||||
|
case, there is little to gain by limiting the free library to free
|
||||||
|
software only, so we use the Lesser General Public License.
|
||||||
|
|
||||||
|
In other cases, permission to use a particular library in non-free
|
||||||
|
programs enables a greater number of people to use a large body of
|
||||||
|
free software. For example, permission to use the GNU C Library in
|
||||||
|
non-free programs enables many more people to use the whole GNU
|
||||||
|
operating system, as well as its variant, the GNU/Linux operating
|
||||||
|
system.
|
||||||
|
|
||||||
|
Although the Lesser General Public License is Less protective of the
|
||||||
|
users' freedom, it does ensure that the user of a program that is
|
||||||
|
linked with the Library has the freedom and the wherewithal to run
|
||||||
|
that program using a modified version of the Library.
|
||||||
|
|
||||||
|
The precise terms and conditions for copying, distribution and
|
||||||
|
modification follow. Pay close attention to the difference between a
|
||||||
|
"work based on the library" and a "work that uses the library". The
|
||||||
|
former contains code derived from the library, whereas the latter must
|
||||||
|
be combined with the library in order to run.
|
||||||
|
|
||||||
|
GNU LESSER GENERAL PUBLIC LICENSE
|
||||||
|
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||||
|
|
||||||
|
0. This License Agreement applies to any software library or other
|
||||||
|
program which contains a notice placed by the copyright holder or
|
||||||
|
other authorized party saying it may be distributed under the terms of
|
||||||
|
this Lesser General Public License (also called "this License").
|
||||||
|
Each licensee is addressed as "you".
|
||||||
|
|
||||||
|
A "library" means a collection of software functions and/or data
|
||||||
|
prepared so as to be conveniently linked with application programs
|
||||||
|
(which use some of those functions and data) to form executables.
|
||||||
|
|
||||||
|
The "Library", below, refers to any such software library or work
|
||||||
|
which has been distributed under these terms. A "work based on the
|
||||||
|
Library" means either the Library or any derivative work under
|
||||||
|
copyright law: that is to say, a work containing the Library or a
|
||||||
|
portion of it, either verbatim or with modifications and/or translated
|
||||||
|
straightforwardly into another language. (Hereinafter, translation is
|
||||||
|
included without limitation in the term "modification".)
|
||||||
|
|
||||||
|
"Source code" for a work means the preferred form of the work for
|
||||||
|
making modifications to it. For a library, complete source code means
|
||||||
|
all the source code for all modules it contains, plus any associated
|
||||||
|
interface definition files, plus the scripts used to control compilation
|
||||||
|
and installation of the library.
|
||||||
|
|
||||||
|
Activities other than copying, distribution and modification are not
|
||||||
|
covered by this License; they are outside its scope. The act of
|
||||||
|
running a program using the Library is not restricted, and output from
|
||||||
|
such a program is covered only if its contents constitute a work based
|
||||||
|
on the Library (independent of the use of the Library in a tool for
|
||||||
|
writing it). Whether that is true depends on what the Library does
|
||||||
|
and what the program that uses the Library does.
|
||||||
|
|
||||||
|
1. You may copy and distribute verbatim copies of the Library's
|
||||||
|
complete source code as you receive it, in any medium, provided that
|
||||||
|
you conspicuously and appropriately publish on each copy an
|
||||||
|
appropriate copyright notice and disclaimer of warranty; keep intact
|
||||||
|
all the notices that refer to this License and to the absence of any
|
||||||
|
warranty; and distribute a copy of this License along with the
|
||||||
|
Library.
|
||||||
|
|
||||||
|
You may charge a fee for the physical act of transferring a copy,
|
||||||
|
and you may at your option offer warranty protection in exchange for a
|
||||||
|
fee.
|
||||||
|
|
||||||
|
2. You may modify your copy or copies of the Library or any portion
|
||||||
|
of it, thus forming a work based on the Library, and copy and
|
||||||
|
distribute such modifications or work under the terms of Section 1
|
||||||
|
above, provided that you also meet all of these conditions:
|
||||||
|
|
||||||
|
a) The modified work must itself be a software library.
|
||||||
|
|
||||||
|
b) You must cause the files modified to carry prominent notices
|
||||||
|
stating that you changed the files and the date of any change.
|
||||||
|
|
||||||
|
c) You must cause the whole of the work to be licensed at no
|
||||||
|
charge to all third parties under the terms of this License.
|
||||||
|
|
||||||
|
d) If a facility in the modified Library refers to a function or a
|
||||||
|
table of data to be supplied by an application program that uses
|
||||||
|
the facility, other than as an argument passed when the facility
|
||||||
|
is invoked, then you must make a good faith effort to ensure that,
|
||||||
|
in the event an application does not supply such function or
|
||||||
|
table, the facility still operates, and performs whatever part of
|
||||||
|
its purpose remains meaningful.
|
||||||
|
|
||||||
|
(For example, a function in a library to compute square roots has
|
||||||
|
a purpose that is entirely well-defined independent of the
|
||||||
|
application. Therefore, Subsection 2d requires that any
|
||||||
|
application-supplied function or table used by this function must
|
||||||
|
be optional: if the application does not supply it, the square
|
||||||
|
root function must still compute square roots.)
|
||||||
|
|
||||||
|
These requirements apply to the modified work as a whole. If
|
||||||
|
identifiable sections of that work are not derived from the Library,
|
||||||
|
and can be reasonably considered independent and separate works in
|
||||||
|
themselves, then this License, and its terms, do not apply to those
|
||||||
|
sections when you distribute them as separate works. But when you
|
||||||
|
distribute the same sections as part of a whole which is a work based
|
||||||
|
on the Library, the distribution of the whole must be on the terms of
|
||||||
|
this License, whose permissions for other licensees extend to the
|
||||||
|
entire whole, and thus to each and every part regardless of who wrote
|
||||||
|
it.
|
||||||
|
|
||||||
|
Thus, it is not the intent of this section to claim rights or contest
|
||||||
|
your rights to work written entirely by you; rather, the intent is to
|
||||||
|
exercise the right to control the distribution of derivative or
|
||||||
|
collective works based on the Library.
|
||||||
|
|
||||||
|
In addition, mere aggregation of another work not based on the Library
|
||||||
|
with the Library (or with a work based on the Library) on a volume of
|
||||||
|
a storage or distribution medium does not bring the other work under
|
||||||
|
the scope of this License.
|
||||||
|
|
||||||
|
3. You may opt to apply the terms of the ordinary GNU General Public
|
||||||
|
License instead of this License to a given copy of the Library. To do
|
||||||
|
this, you must alter all the notices that refer to this License, so
|
||||||
|
that they refer to the ordinary GNU General Public License, version 2,
|
||||||
|
instead of to this License. (If a newer version than version 2 of the
|
||||||
|
ordinary GNU General Public License has appeared, then you can specify
|
||||||
|
that version instead if you wish.) Do not make any other change in
|
||||||
|
these notices.
|
||||||
|
|
||||||
|
Once this change is made in a given copy, it is irreversible for
|
||||||
|
that copy, so the ordinary GNU General Public License applies to all
|
||||||
|
subsequent copies and derivative works made from that copy.
|
||||||
|
|
||||||
|
This option is useful when you wish to copy part of the code of
|
||||||
|
the Library into a program that is not a library.
|
||||||
|
|
||||||
|
4. You may copy and distribute the Library (or a portion or
|
||||||
|
derivative of it, under Section 2) in object code or executable form
|
||||||
|
under the terms of Sections 1 and 2 above provided that you accompany
|
||||||
|
it with the complete corresponding machine-readable source code, which
|
||||||
|
must be distributed under the terms of Sections 1 and 2 above on a
|
||||||
|
medium customarily used for software interchange.
|
||||||
|
|
||||||
|
If distribution of object code is made by offering access to copy
|
||||||
|
from a designated place, then offering equivalent access to copy the
|
||||||
|
source code from the same place satisfies the requirement to
|
||||||
|
distribute the source code, even though third parties are not
|
||||||
|
compelled to copy the source along with the object code.
|
||||||
|
|
||||||
|
5. A program that contains no derivative of any portion of the
|
||||||
|
Library, but is designed to work with the Library by being compiled or
|
||||||
|
linked with it, is called a "work that uses the Library". Such a
|
||||||
|
work, in isolation, is not a derivative work of the Library, and
|
||||||
|
therefore falls outside the scope of this License.
|
||||||
|
|
||||||
|
However, linking a "work that uses the Library" with the Library
|
||||||
|
creates an executable that is a derivative of the Library (because it
|
||||||
|
contains portions of the Library), rather than a "work that uses the
|
||||||
|
library". The executable is therefore covered by this License.
|
||||||
|
Section 6 states terms for distribution of such executables.
|
||||||
|
|
||||||
|
When a "work that uses the Library" uses material from a header file
|
||||||
|
that is part of the Library, the object code for the work may be a
|
||||||
|
derivative work of the Library even though the source code is not.
|
||||||
|
Whether this is true is especially significant if the work can be
|
||||||
|
linked without the Library, or if the work is itself a library. The
|
||||||
|
threshold for this to be true is not precisely defined by law.
|
||||||
|
|
||||||
|
If such an object file uses only numerical parameters, data
|
||||||
|
structure layouts and accessors, and small macros and small inline
|
||||||
|
functions (ten lines or less in length), then the use of the object
|
||||||
|
file is unrestricted, regardless of whether it is legally a derivative
|
||||||
|
work. (Executables containing this object code plus portions of the
|
||||||
|
Library will still fall under Section 6.)
|
||||||
|
|
||||||
|
Otherwise, if the work is a derivative of the Library, you may
|
||||||
|
distribute the object code for the work under the terms of Section 6.
|
||||||
|
Any executables containing that work also fall under Section 6,
|
||||||
|
whether or not they are linked directly with the Library itself.
|
||||||
|
|
||||||
|
6. As an exception to the Sections above, you may also combine or
|
||||||
|
link a "work that uses the Library" with the Library to produce a
|
||||||
|
work containing portions of the Library, and distribute that work
|
||||||
|
under terms of your choice, provided that the terms permit
|
||||||
|
modification of the work for the customer's own use and reverse
|
||||||
|
engineering for debugging such modifications.
|
||||||
|
|
||||||
|
You must give prominent notice with each copy of the work that the
|
||||||
|
Library is used in it and that the Library and its use are covered by
|
||||||
|
this License. You must supply a copy of this License. If the work
|
||||||
|
during execution displays copyright notices, you must include the
|
||||||
|
copyright notice for the Library among them, as well as a reference
|
||||||
|
directing the user to the copy of this License. Also, you must do one
|
||||||
|
of these things:
|
||||||
|
|
||||||
|
a) Accompany the work with the complete corresponding
|
||||||
|
machine-readable source code for the Library including whatever
|
||||||
|
changes were used in the work (which must be distributed under
|
||||||
|
Sections 1 and 2 above); and, if the work is an executable linked
|
||||||
|
with the Library, with the complete machine-readable "work that
|
||||||
|
uses the Library", as object code and/or source code, so that the
|
||||||
|
user can modify the Library and then relink to produce a modified
|
||||||
|
executable containing the modified Library. (It is understood
|
||||||
|
that the user who changes the contents of definitions files in the
|
||||||
|
Library will not necessarily be able to recompile the application
|
||||||
|
to use the modified definitions.)
|
||||||
|
|
||||||
|
b) Use a suitable shared library mechanism for linking with the
|
||||||
|
Library. A suitable mechanism is one that (1) uses at run time a
|
||||||
|
copy of the library already present on the user's computer system,
|
||||||
|
rather than copying library functions into the executable, and (2)
|
||||||
|
will operate properly with a modified version of the library, if
|
||||||
|
the user installs one, as long as the modified version is
|
||||||
|
interface-compatible with the version that the work was made with.
|
||||||
|
|
||||||
|
c) Accompany the work with a written offer, valid for at
|
||||||
|
least three years, to give the same user the materials
|
||||||
|
specified in Subsection 6a, above, for a charge no more
|
||||||
|
than the cost of performing this distribution.
|
||||||
|
|
||||||
|
d) If distribution of the work is made by offering access to copy
|
||||||
|
from a designated place, offer equivalent access to copy the above
|
||||||
|
specified materials from the same place.
|
||||||
|
|
||||||
|
e) Verify that the user has already received a copy of these
|
||||||
|
materials or that you have already sent this user a copy.
|
||||||
|
|
||||||
|
For an executable, the required form of the "work that uses the
|
||||||
|
Library" must include any data and utility programs needed for
|
||||||
|
reproducing the executable from it. However, as a special exception,
|
||||||
|
the materials to be distributed need not include anything that is
|
||||||
|
normally distributed (in either source or binary form) with the major
|
||||||
|
components (compiler, kernel, and so on) of the operating system on
|
||||||
|
which the executable runs, unless that component itself accompanies
|
||||||
|
the executable.
|
||||||
|
|
||||||
|
It may happen that this requirement contradicts the license
|
||||||
|
restrictions of other proprietary libraries that do not normally
|
||||||
|
accompany the operating system. Such a contradiction means you cannot
|
||||||
|
use both them and the Library together in an executable that you
|
||||||
|
distribute.
|
||||||
|
|
||||||
|
7. You may place library facilities that are a work based on the
|
||||||
|
Library side-by-side in a single library together with other library
|
||||||
|
facilities not covered by this License, and distribute such a combined
|
||||||
|
library, provided that the separate distribution of the work based on
|
||||||
|
the Library and of the other library facilities is otherwise
|
||||||
|
permitted, and provided that you do these two things:
|
||||||
|
|
||||||
|
a) Accompany the combined library with a copy of the same work
|
||||||
|
based on the Library, uncombined with any other library
|
||||||
|
facilities. This must be distributed under the terms of the
|
||||||
|
Sections above.
|
||||||
|
|
||||||
|
b) Give prominent notice with the combined library of the fact
|
||||||
|
that part of it is a work based on the Library, and explaining
|
||||||
|
where to find the accompanying uncombined form of the same work.
|
||||||
|
|
||||||
|
8. You may not copy, modify, sublicense, link with, or distribute
|
||||||
|
the Library except as expressly provided under this License. Any
|
||||||
|
attempt otherwise to copy, modify, sublicense, link with, or
|
||||||
|
distribute the Library is void, and will automatically terminate your
|
||||||
|
rights under this License. However, parties who have received copies,
|
||||||
|
or rights, from you under this License will not have their licenses
|
||||||
|
terminated so long as such parties remain in full compliance.
|
||||||
|
|
||||||
|
9. You are not required to accept this License, since you have not
|
||||||
|
signed it. However, nothing else grants you permission to modify or
|
||||||
|
distribute the Library or its derivative works. These actions are
|
||||||
|
prohibited by law if you do not accept this License. Therefore, by
|
||||||
|
modifying or distributing the Library (or any work based on the
|
||||||
|
Library), you indicate your acceptance of this License to do so, and
|
||||||
|
all its terms and conditions for copying, distributing or modifying
|
||||||
|
the Library or works based on it.
|
||||||
|
|
||||||
|
10. Each time you redistribute the Library (or any work based on the
|
||||||
|
Library), the recipient automatically receives a license from the
|
||||||
|
original licensor to copy, distribute, link with or modify the Library
|
||||||
|
subject to these terms and conditions. You may not impose any further
|
||||||
|
restrictions on the recipients' exercise of the rights granted herein.
|
||||||
|
You are not responsible for enforcing compliance by third parties with
|
||||||
|
this License.
|
||||||
|
|
||||||
|
11. If, as a consequence of a court judgment or allegation of patent
|
||||||
|
infringement or for any other reason (not limited to patent issues),
|
||||||
|
conditions are imposed on you (whether by court order, agreement or
|
||||||
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
|
excuse you from the conditions of this License. If you cannot
|
||||||
|
distribute so as to satisfy simultaneously your obligations under this
|
||||||
|
License and any other pertinent obligations, then as a consequence you
|
||||||
|
may not distribute the Library at all. For example, if a patent
|
||||||
|
license would not permit royalty-free redistribution of the Library by
|
||||||
|
all those who receive copies directly or indirectly through you, then
|
||||||
|
the only way you could satisfy both it and this License would be to
|
||||||
|
refrain entirely from distribution of the Library.
|
||||||
|
|
||||||
|
If any portion of this section is held invalid or unenforceable under any
|
||||||
|
particular circumstance, the balance of the section is intended to apply,
|
||||||
|
and the section as a whole is intended to apply in other circumstances.
|
||||||
|
|
||||||
|
It is not the purpose of this section to induce you to infringe any
|
||||||
|
patents or other property right claims or to contest validity of any
|
||||||
|
such claims; this section has the sole purpose of protecting the
|
||||||
|
integrity of the free software distribution system which is
|
||||||
|
implemented by public license practices. Many people have made
|
||||||
|
generous contributions to the wide range of software distributed
|
||||||
|
through that system in reliance on consistent application of that
|
||||||
|
system; it is up to the author/donor to decide if he or she is willing
|
||||||
|
to distribute software through any other system and a licensee cannot
|
||||||
|
impose that choice.
|
||||||
|
|
||||||
|
This section is intended to make thoroughly clear what is believed to
|
||||||
|
be a consequence of the rest of this License.
|
||||||
|
|
||||||
|
12. If the distribution and/or use of the Library is restricted in
|
||||||
|
certain countries either by patents or by copyrighted interfaces, the
|
||||||
|
original copyright holder who places the Library under this License may add
|
||||||
|
an explicit geographical distribution limitation excluding those countries,
|
||||||
|
so that distribution is permitted only in or among countries not thus
|
||||||
|
excluded. In such case, this License incorporates the limitation as if
|
||||||
|
written in the body of this License.
|
||||||
|
|
||||||
|
13. The Free Software Foundation may publish revised and/or new
|
||||||
|
versions of the Lesser General Public License from time to time.
|
||||||
|
Such new versions will be similar in spirit to the present version,
|
||||||
|
but may differ in detail to address new problems or concerns.
|
||||||
|
|
||||||
|
Each version is given a distinguishing version number. If the Library
|
||||||
|
specifies a version number of this License which applies to it and
|
||||||
|
"any later version", you have the option of following the terms and
|
||||||
|
conditions either of that version or of any later version published by
|
||||||
|
the Free Software Foundation. If the Library does not specify a
|
||||||
|
license version number, you may choose any version ever published by
|
||||||
|
the Free Software Foundation.
|
||||||
|
|
||||||
|
14. If you wish to incorporate parts of the Library into other free
|
||||||
|
programs whose distribution conditions are incompatible with these,
|
||||||
|
write to the author to ask for permission. For software which is
|
||||||
|
copyrighted by the Free Software Foundation, write to the Free
|
||||||
|
Software Foundation; we sometimes make exceptions for this. Our
|
||||||
|
decision will be guided by the two goals of preserving the free status
|
||||||
|
of all derivatives of our free software and of promoting the sharing
|
||||||
|
and reuse of software generally.
|
||||||
|
|
||||||
|
NO WARRANTY
|
||||||
|
|
||||||
|
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
|
||||||
|
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
|
||||||
|
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
|
||||||
|
OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
|
||||||
|
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
|
||||||
|
LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
|
||||||
|
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
|
|
||||||
|
16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
|
||||||
|
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
|
||||||
|
AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
|
||||||
|
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
|
||||||
|
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
|
||||||
|
LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
|
||||||
|
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
|
||||||
|
FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
|
||||||
|
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||||
|
DAMAGES.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
How to Apply These Terms to Your New Libraries
|
||||||
|
|
||||||
|
If you develop a new library, and you want it to be of the greatest
|
||||||
|
possible use to the public, we recommend making it free software that
|
||||||
|
everyone can redistribute and change. You can do so by permitting
|
||||||
|
redistribution under these terms (or, alternatively, under the terms of the
|
||||||
|
ordinary General Public License).
|
||||||
|
|
||||||
|
To apply these terms, attach the following notices to the library. It is
|
||||||
|
safest to attach them to the start of each source file to most effectively
|
||||||
|
convey the exclusion of warranty; and each file should have at least the
|
||||||
|
"copyright" line and a pointer to where the full notice is found.
|
||||||
|
|
||||||
|
<one line to give the library's name and a brief idea of what it does.>
|
||||||
|
Copyright (C) <year> <name of author>
|
||||||
|
|
||||||
|
This library is free software; you can redistribute it and/or
|
||||||
|
modify it under the terms of the GNU Lesser General Public
|
||||||
|
License as published by the Free Software Foundation; either
|
||||||
|
version 2.1 of the License, or (at your option) any later version.
|
||||||
|
|
||||||
|
This library is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||||
|
Lesser General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Lesser General Public
|
||||||
|
License along with this library; if not, write to the Free Software
|
||||||
|
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
|
||||||
|
You should also get your employer (if you work as a programmer) or your
|
||||||
|
school, if any, to sign a "copyright disclaimer" for the library, if
|
||||||
|
necessary. Here is a sample; alter the names:
|
||||||
|
|
||||||
|
Yoyodyne, Inc., hereby disclaims all copyright interest in the
|
||||||
|
library `Frob' (a library for tweaking knobs) written by James Random Hacker.
|
||||||
|
|
||||||
|
<signature of Ty Coon>, 1 April 1990
|
||||||
|
Ty Coon, President of Vice
|
||||||
|
|
||||||
|
That's all there is to it!
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,38 @@
|
||||||
|
//
|
||||||
|
// boost/assert.hpp - BOOST_ASSERT(expr)
|
||||||
|
//
|
||||||
|
// Copyright (c) 2001, 2002 Peter Dimov and Multi Media Ltd.
|
||||||
|
//
|
||||||
|
// Permission to copy, use, modify, sell and distribute this software
|
||||||
|
// is granted provided this copyright notice appears in all copies.
|
||||||
|
// This software is provided "as is" without express or implied
|
||||||
|
// warranty, and with no claim as to its suitability for any purpose.
|
||||||
|
//
|
||||||
|
// Note: There are no include guards. This is intentional.
|
||||||
|
//
|
||||||
|
// See http://www.boost.org/libs/utility/assert.html for documentation.
|
||||||
|
//
|
||||||
|
|
||||||
|
#undef BOOST_ASSERT
|
||||||
|
|
||||||
|
#if defined(BOOST_DISABLE_ASSERTS)
|
||||||
|
|
||||||
|
# define BOOST_ASSERT(expr) ((void)0)
|
||||||
|
|
||||||
|
#elif defined(BOOST_ENABLE_ASSERT_HANDLER)
|
||||||
|
|
||||||
|
#include <boost/current_function.hpp>
|
||||||
|
|
||||||
|
namespace boost
|
||||||
|
{
|
||||||
|
|
||||||
|
void assertion_failed(char const * expr, char const * function, char const * file, long line); // user defined
|
||||||
|
|
||||||
|
} // namespace boost
|
||||||
|
|
||||||
|
#define BOOST_ASSERT(expr) ((expr)? ((void)0): ::boost::assertion_failed(#expr, BOOST_CURRENT_FUNCTION, __FILE__, __LINE__))
|
||||||
|
|
||||||
|
#else
|
||||||
|
# include <assert.h>
|
||||||
|
# define BOOST_ASSERT(expr) assert(expr)
|
||||||
|
#endif
|
|
@ -0,0 +1,64 @@
|
||||||
|
// -*- C++ -*-
|
||||||
|
// Boost general library 'format' ---------------------------
|
||||||
|
// See http://www.boost.org for updates, documentation, and revision history.
|
||||||
|
|
||||||
|
// (C) Samuel Krempp 2001
|
||||||
|
// krempp@crans.ens-cachan.fr
|
||||||
|
// Permission to copy, use, modify, sell and
|
||||||
|
// distribute this software is granted provided this copyright notice appears
|
||||||
|
// in all copies. This software is provided "as is" without express or implied
|
||||||
|
// warranty, and with no claim as to its suitability for any purpose.
|
||||||
|
|
||||||
|
// ideas taken from Rüdiger Loos's format class
|
||||||
|
// and Karl Nelson's ofstream
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// format.hpp : primary header
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#ifndef BOOST_FORMAT_HPP
|
||||||
|
#define BOOST_FORMAT_HPP
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include <string>
|
||||||
|
#include <sstream>
|
||||||
|
#include <cassert>
|
||||||
|
|
||||||
|
#if HAVE_LOCALE
|
||||||
|
#include <locale>
|
||||||
|
#else
|
||||||
|
#define BOOST_NO_STD_LOCALE
|
||||||
|
#define BOOST_NO_LOCALE_ISIDIGIT
|
||||||
|
#include <cctype>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <boost/format/macros_default.hpp>
|
||||||
|
|
||||||
|
|
||||||
|
// **** Forward declarations ----------------------------------
|
||||||
|
#include <boost/format/format_fwd.hpp> // basic_format<Ch,Tr>, and other frontends
|
||||||
|
#include <boost/format/internals_fwd.hpp> // misc forward declarations for internal use
|
||||||
|
|
||||||
|
|
||||||
|
// **** Auxiliary structs (stream_format_state<Ch,Tr> , and format_item<Ch,Tr> )
|
||||||
|
#include <boost/format/internals.hpp>
|
||||||
|
|
||||||
|
// **** Format class interface --------------------------------
|
||||||
|
#include <boost/format/format_class.hpp>
|
||||||
|
|
||||||
|
// **** Exceptions -----------------------------------------------
|
||||||
|
#include <boost/format/exceptions.hpp>
|
||||||
|
|
||||||
|
// **** Implementation -------------------------------------------
|
||||||
|
//#include <boost/format/format_implementation.hpp> // member functions
|
||||||
|
|
||||||
|
#include <boost/format/group.hpp> // class for grouping arguments
|
||||||
|
|
||||||
|
#include <boost/format/feed_args.hpp> // argument-feeding functions
|
||||||
|
//#include <boost/format/parsing.hpp> // format-string parsing (member-)functions
|
||||||
|
|
||||||
|
// **** Implementation of the free functions ----------------------
|
||||||
|
//#include <boost/format/free_funcs.hpp>
|
||||||
|
|
||||||
|
|
||||||
|
#endif // BOOST_FORMAT_HPP
|
|
@ -0,0 +1,96 @@
|
||||||
|
// -*- C++ -*-
|
||||||
|
// Boost general library 'format' ---------------------------
|
||||||
|
// See http://www.boost.org for updates, documentation, and revision history.
|
||||||
|
|
||||||
|
// (C) Samuel Krempp 2001
|
||||||
|
// krempp@crans.ens-cachan.fr
|
||||||
|
// Permission to copy, use, modify, sell and
|
||||||
|
// distribute this software is granted provided this copyright notice appears
|
||||||
|
// in all copies. This software is provided "as is" without express or implied
|
||||||
|
// warranty, and with no claim as to its suitability for any purpose.
|
||||||
|
|
||||||
|
// ideas taken from Rüdiger Loos's format class
|
||||||
|
// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------------
|
||||||
|
// exceptions.hpp
|
||||||
|
// ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef BOOST_FORMAT_EXCEPTIONS_HPP
|
||||||
|
#define BOOST_FORMAT_EXCEPTIONS_HPP
|
||||||
|
|
||||||
|
|
||||||
|
#include <stdexcept>
|
||||||
|
|
||||||
|
|
||||||
|
namespace boost {
|
||||||
|
|
||||||
|
namespace io {
|
||||||
|
|
||||||
|
// **** exceptions -----------------------------------------------
|
||||||
|
|
||||||
|
class format_error : public std::exception
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
format_error() {}
|
||||||
|
virtual const char *what() const throw()
|
||||||
|
{
|
||||||
|
return "boost::format_error: "
|
||||||
|
"format generic failure";
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class bad_format_string : public format_error
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
bad_format_string() {}
|
||||||
|
virtual const char *what() const throw()
|
||||||
|
{
|
||||||
|
return "boost::bad_format_string: "
|
||||||
|
"format-string is ill-formed";
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class too_few_args : public format_error
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
too_few_args() {}
|
||||||
|
virtual const char *what() const throw()
|
||||||
|
{
|
||||||
|
return "boost::too_few_args: "
|
||||||
|
"format-string refered to more arguments than were passed";
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class too_many_args : public format_error
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
too_many_args() {}
|
||||||
|
virtual const char *what() const throw()
|
||||||
|
{
|
||||||
|
return "boost::too_many_args: "
|
||||||
|
"format-string refered to less arguments than were passed";
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class out_of_range : public format_error
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
out_of_range() {}
|
||||||
|
virtual const char *what() const throw()
|
||||||
|
{
|
||||||
|
return "boost::out_of_range: "
|
||||||
|
"tried to refer to an argument (or item) number which is out of range, "
|
||||||
|
"according to the format string.";
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
} // namespace io
|
||||||
|
|
||||||
|
} // namespace boost
|
||||||
|
|
||||||
|
|
||||||
|
#endif // BOOST_FORMAT_EXCEPTIONS_HPP
|
|
@ -0,0 +1,247 @@
|
||||||
|
// -*- C++ -*-
|
||||||
|
// Boost general library 'format' ---------------------------
|
||||||
|
// See http://www.boost.org for updates, documentation, and revision history.
|
||||||
|
|
||||||
|
// (C) Samuel Krempp 2001
|
||||||
|
// krempp@crans.ens-cachan.fr
|
||||||
|
// Permission to copy, use, modify, sell and
|
||||||
|
// distribute this software is granted provided this copyright notice appears
|
||||||
|
// in all copies. This software is provided "as is" without express or implied
|
||||||
|
// warranty, and with no claim as to its suitability for any purpose.
|
||||||
|
|
||||||
|
// ideas taken from Rüdiger Loos's format class
|
||||||
|
// and Karl Nelson's ofstream
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// feed_args.hpp : functions for processing each argument
|
||||||
|
// (feed, feed_manip, and distribute)
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef BOOST_FORMAT_FEED_ARGS_HPP
|
||||||
|
#define BOOST_FORMAT_FEED_ARGS_HPP
|
||||||
|
|
||||||
|
#include "boost/format/format_class.hpp"
|
||||||
|
#include "boost/format/group.hpp"
|
||||||
|
|
||||||
|
#include "boost/throw_exception.hpp"
|
||||||
|
|
||||||
|
namespace boost {
|
||||||
|
namespace io {
|
||||||
|
namespace detail {
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
inline
|
||||||
|
void empty_buf(BOOST_IO_STD ostringstream & os) {
|
||||||
|
static const std::string emptyStr;
|
||||||
|
os.str(emptyStr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void do_pad( std::string & s,
|
||||||
|
std::streamsize w,
|
||||||
|
const char c,
|
||||||
|
std::ios::fmtflags f,
|
||||||
|
bool center)
|
||||||
|
// applies centered / left / right padding to the string s.
|
||||||
|
// Effects : string s is padded.
|
||||||
|
{
|
||||||
|
std::streamsize n=w-s.size();
|
||||||
|
if(n<=0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if(center)
|
||||||
|
{
|
||||||
|
s.reserve(w); // allocate once for the 2 inserts
|
||||||
|
const std::streamsize n1 = n /2, n0 = n - n1;
|
||||||
|
s.insert(s.begin(), n0, c);
|
||||||
|
s.append(n1, c);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if(f & std::ios::left) {
|
||||||
|
s.append(n, c);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
s.insert(s.begin(), n, c);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // -do_pad(..)
|
||||||
|
|
||||||
|
|
||||||
|
template<class T> inline
|
||||||
|
void put_head(BOOST_IO_STD ostream& , const T& ) {
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class T> inline
|
||||||
|
void put_head( BOOST_IO_STD ostream& os, const group1<T>& x ) {
|
||||||
|
os << group_head(x.a1_); // send the first N-1 items, not the last
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class T> inline
|
||||||
|
void put_last( BOOST_IO_STD ostream& os, const T& x ) {
|
||||||
|
os << x ;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class T> inline
|
||||||
|
void put_last( BOOST_IO_STD ostream& os, const group1<T>& x ) {
|
||||||
|
os << group_last(x.a1_); // this selects the last element
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef BOOST_NO_OVERLOAD_FOR_NON_CONST
|
||||||
|
template<class T> inline
|
||||||
|
void put_head( BOOST_IO_STD ostream& , T& ) {
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class T> inline
|
||||||
|
void put_last( BOOST_IO_STD ostream& os, T& x ) {
|
||||||
|
os << x ;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
void put( T x,
|
||||||
|
const format_item& specs,
|
||||||
|
std::string & res,
|
||||||
|
BOOST_IO_STD ostringstream& oss_ )
|
||||||
|
{
|
||||||
|
// does the actual conversion of x, with given params, into a string
|
||||||
|
// using the *supplied* strinstream. (the stream state is important)
|
||||||
|
|
||||||
|
typedef std::string string_t;
|
||||||
|
typedef format_item format_item_t;
|
||||||
|
|
||||||
|
stream_format_state prev_state(oss_);
|
||||||
|
|
||||||
|
specs.state_.apply_on(oss_);
|
||||||
|
|
||||||
|
// in case x is a group, apply the manip part of it,
|
||||||
|
// in order to find width
|
||||||
|
put_head( oss_, x );
|
||||||
|
empty_buf( oss_);
|
||||||
|
|
||||||
|
const std::streamsize w=oss_.width();
|
||||||
|
const std::ios::fmtflags fl=oss_.flags();
|
||||||
|
const bool internal = (fl & std::ios::internal) != 0;
|
||||||
|
const bool two_stepped_padding = internal
|
||||||
|
&& ! ( specs.pad_scheme_ & format_item_t::spacepad )
|
||||||
|
&& specs.truncate_ < 0 ;
|
||||||
|
|
||||||
|
|
||||||
|
if(! two_stepped_padding)
|
||||||
|
{
|
||||||
|
if(w>0) // handle simple padding via do_pad, not natively in stream
|
||||||
|
oss_.width(0);
|
||||||
|
put_last( oss_, x);
|
||||||
|
res = oss_.str();
|
||||||
|
|
||||||
|
if (specs.truncate_ >= 0)
|
||||||
|
res.erase(specs.truncate_);
|
||||||
|
|
||||||
|
// complex pads :
|
||||||
|
if(specs.pad_scheme_ & format_item_t::spacepad)
|
||||||
|
{
|
||||||
|
if( res.size()==0 || ( res[0]!='+' && res[0]!='-' ))
|
||||||
|
{
|
||||||
|
res.insert(res.begin(), 1, ' '); // insert 1 space at pos 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if(w > 0) // need do_pad
|
||||||
|
{
|
||||||
|
do_pad(res,w,oss_.fill(), fl, (specs.pad_scheme_ & format_item_t::centered) !=0 );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else // 2-stepped padding
|
||||||
|
{
|
||||||
|
put_last( oss_, x); // oss_.width() may result in padding.
|
||||||
|
res = oss_.str();
|
||||||
|
|
||||||
|
if (specs.truncate_ >= 0)
|
||||||
|
res.erase(specs.truncate_);
|
||||||
|
|
||||||
|
if( res.size() - w > 0)
|
||||||
|
{ // length w exceeded
|
||||||
|
// either it was multi-output with first output padding up all width..
|
||||||
|
// either it was one big arg and we are fine.
|
||||||
|
empty_buf( oss_);
|
||||||
|
oss_.width(0);
|
||||||
|
put_last(oss_, x );
|
||||||
|
string_t tmp = oss_.str(); // minimal-length output
|
||||||
|
std::streamsize d;
|
||||||
|
if( (d=w - tmp.size()) <=0 )
|
||||||
|
{
|
||||||
|
// minimal length is already >= w, so no padding (cool!)
|
||||||
|
res.swap(tmp);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{ // hum.. we need to pad (it was necessarily multi-output)
|
||||||
|
typedef typename string_t::size_type size_type;
|
||||||
|
size_type i = 0;
|
||||||
|
while( i<tmp.size() && tmp[i] == res[i] ) // find where we should pad.
|
||||||
|
++i;
|
||||||
|
tmp.insert(i, static_cast<size_type>( d ), oss_.fill());
|
||||||
|
res.swap( tmp );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{ // okay, only one thing was printed and padded, so res is fine.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
prev_state.apply_on(oss_);
|
||||||
|
empty_buf( oss_);
|
||||||
|
oss_.clear();
|
||||||
|
} // end- put(..)
|
||||||
|
|
||||||
|
|
||||||
|
} // local namespace
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
void distribute(basic_format& self, T x)
|
||||||
|
// call put(x, ..) on every occurence of the current argument :
|
||||||
|
{
|
||||||
|
if(self.cur_arg_ >= self.num_args_)
|
||||||
|
{
|
||||||
|
if( self.exceptions() & too_many_args_bit )
|
||||||
|
boost::throw_exception(too_many_args()); // too many variables have been supplied !
|
||||||
|
else return;
|
||||||
|
}
|
||||||
|
for(unsigned long i=0; i < self.items_.size(); ++i)
|
||||||
|
{
|
||||||
|
if(self.items_[i].argN_ == self.cur_arg_)
|
||||||
|
{
|
||||||
|
put<T> (x, self.items_[i], self.items_[i].res_, self.oss_ );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
basic_format& feed(basic_format& self, T x)
|
||||||
|
{
|
||||||
|
if(self.dumped_) self.clear();
|
||||||
|
distribute<T> (self, x);
|
||||||
|
++self.cur_arg_;
|
||||||
|
if(self.bound_.size() != 0)
|
||||||
|
{
|
||||||
|
while( self.cur_arg_ < self.num_args_ && self.bound_[self.cur_arg_] )
|
||||||
|
++self.cur_arg_;
|
||||||
|
}
|
||||||
|
|
||||||
|
// this arg is finished, reset the stream's format state
|
||||||
|
self.state0_.apply_on(self.oss_);
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
} // namespace detail
|
||||||
|
} // namespace io
|
||||||
|
} // namespace boost
|
||||||
|
|
||||||
|
|
||||||
|
#endif // BOOST_FORMAT_FEED_ARGS_HPP
|
|
@ -0,0 +1,135 @@
|
||||||
|
// -*- C++ -*-
|
||||||
|
// Boost general library 'format' ---------------------------
|
||||||
|
// See http://www.boost.org for updates, documentation, and revision history.
|
||||||
|
|
||||||
|
// (C) Samuel Krempp 2001
|
||||||
|
// krempp@crans.ens-cachan.fr
|
||||||
|
// Permission to copy, use, modify, sell and
|
||||||
|
// distribute this software is granted provided this copyright notice appears
|
||||||
|
// in all copies. This software is provided "as is" without express or implied
|
||||||
|
// warranty, and with no claim as to its suitability for any purpose.
|
||||||
|
|
||||||
|
// ideas taken from Rüdiger Loos's format class
|
||||||
|
// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------------
|
||||||
|
// format_class.hpp : class interface
|
||||||
|
// ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef BOOST_FORMAT_CLASS_HPP
|
||||||
|
#define BOOST_FORMAT_CLASS_HPP
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include <boost/format/format_fwd.hpp>
|
||||||
|
#include <boost/format/internals_fwd.hpp>
|
||||||
|
|
||||||
|
#include <boost/format/internals.hpp>
|
||||||
|
|
||||||
|
namespace boost {
|
||||||
|
|
||||||
|
class basic_format
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
typedef std::string string_t;
|
||||||
|
typedef BOOST_IO_STD ostringstream internal_stream_t;
|
||||||
|
private:
|
||||||
|
typedef BOOST_IO_STD ostream stream_t;
|
||||||
|
typedef io::detail::stream_format_state stream_format_state;
|
||||||
|
typedef io::detail::format_item format_item_t;
|
||||||
|
|
||||||
|
public:
|
||||||
|
basic_format(const char* str);
|
||||||
|
basic_format(const string_t& s);
|
||||||
|
#ifndef BOOST_NO_STD_LOCALE
|
||||||
|
basic_format(const char* str, const std::locale & loc);
|
||||||
|
basic_format(const string_t& s, const std::locale & loc);
|
||||||
|
#endif // no locale
|
||||||
|
basic_format(const basic_format& x);
|
||||||
|
basic_format& operator= (const basic_format& x);
|
||||||
|
|
||||||
|
basic_format& clear(); // empty the string buffers (except bound arguments, see clear_binds() )
|
||||||
|
|
||||||
|
// pass arguments through those operators :
|
||||||
|
template<class T> basic_format& operator%(const T& x)
|
||||||
|
{
|
||||||
|
return io::detail::feed<const T&>(*this,x);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef BOOST_NO_OVERLOAD_FOR_NON_CONST
|
||||||
|
template<class T> basic_format& operator%(T& x)
|
||||||
|
{
|
||||||
|
return io::detail::feed<T&>(*this,x);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
// system for binding arguments :
|
||||||
|
template<class T>
|
||||||
|
basic_format& bind_arg(int argN, const T& val)
|
||||||
|
{
|
||||||
|
return io::detail::bind_arg_body(*this, argN, val);
|
||||||
|
}
|
||||||
|
basic_format& clear_bind(int argN);
|
||||||
|
basic_format& clear_binds();
|
||||||
|
|
||||||
|
// modify the params of a directive, by applying a manipulator :
|
||||||
|
template<class T>
|
||||||
|
basic_format& modify_item(int itemN, const T& manipulator)
|
||||||
|
{
|
||||||
|
return io::detail::modify_item_body(*this, itemN, manipulator) ;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Choosing which errors will throw exceptions :
|
||||||
|
unsigned char exceptions() const;
|
||||||
|
unsigned char exceptions(unsigned char newexcept);
|
||||||
|
|
||||||
|
// final output
|
||||||
|
string_t str() const;
|
||||||
|
friend BOOST_IO_STD ostream&
|
||||||
|
operator<< ( BOOST_IO_STD ostream& , const basic_format& );
|
||||||
|
|
||||||
|
|
||||||
|
template<class T> friend basic_format&
|
||||||
|
io::detail::feed(basic_format&, T);
|
||||||
|
|
||||||
|
template<class T> friend
|
||||||
|
void io::detail::distribute(basic_format&, T);
|
||||||
|
|
||||||
|
template<class T> friend
|
||||||
|
basic_format& io::detail::modify_item_body(basic_format&, int, const T&);
|
||||||
|
|
||||||
|
template<class T> friend
|
||||||
|
basic_format& io::detail::bind_arg_body(basic_format&, int, const T&);
|
||||||
|
|
||||||
|
// make the members private only if the friend templates are supported
|
||||||
|
private:
|
||||||
|
|
||||||
|
// flag bits, used for style_
|
||||||
|
enum style_values { ordered = 1, // set only if all directives are positional directives
|
||||||
|
special_needs = 4 };
|
||||||
|
|
||||||
|
// parse the format string :
|
||||||
|
void parse(const string_t&);
|
||||||
|
|
||||||
|
int style_; // style of format-string : positional or not, etc
|
||||||
|
int cur_arg_; // keep track of wich argument will come
|
||||||
|
int num_args_; // number of expected arguments
|
||||||
|
mutable bool dumped_; // true only after call to str() or <<
|
||||||
|
std::vector<format_item_t> items_; // vector of directives (aka items)
|
||||||
|
string_t prefix_; // piece of string to insert before first item
|
||||||
|
|
||||||
|
std::vector<bool> bound_; // stores which arguments were bound
|
||||||
|
// size = num_args OR zero
|
||||||
|
internal_stream_t oss_; // the internal stream.
|
||||||
|
stream_format_state state0_; // reference state for oss_
|
||||||
|
unsigned char exceptions_;
|
||||||
|
}; // class basic_format
|
||||||
|
|
||||||
|
|
||||||
|
} // namespace boost
|
||||||
|
|
||||||
|
|
||||||
|
#endif // BOOST_FORMAT_CLASS_HPP
|
|
@ -0,0 +1,49 @@
|
||||||
|
// -*- C++ -*-
|
||||||
|
// Boost general library 'format' ---------------------------
|
||||||
|
// See http://www.boost.org for updates, documentation, and revision history.
|
||||||
|
|
||||||
|
// (C) Samuel Krempp 2001
|
||||||
|
// krempp@crans.ens-cachan.fr
|
||||||
|
// Permission to copy, use, modify, sell and
|
||||||
|
// distribute this software is granted provided this copyright notice appears
|
||||||
|
// in all copies. This software is provided "as is" without express or implied
|
||||||
|
// warranty, and with no claim as to its suitability for any purpose.
|
||||||
|
|
||||||
|
// ideas taken from Rüdiger Loos's format class
|
||||||
|
// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------------
|
||||||
|
// format_fwd.hpp : forward declarations, for primary header format.hpp
|
||||||
|
// ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#ifndef BOOST_FORMAT_FWD_HPP
|
||||||
|
#define BOOST_FORMAT_FWD_HPP
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <iosfwd>
|
||||||
|
|
||||||
|
namespace boost {
|
||||||
|
|
||||||
|
class basic_format;
|
||||||
|
|
||||||
|
typedef basic_format format;
|
||||||
|
|
||||||
|
namespace io {
|
||||||
|
enum format_error_bits { bad_format_string_bit = 1,
|
||||||
|
too_few_args_bit = 2, too_many_args_bit = 4,
|
||||||
|
out_of_range_bit = 8,
|
||||||
|
all_error_bits = 255, no_error_bits=0 };
|
||||||
|
|
||||||
|
// Convertion: format to string
|
||||||
|
std::string str(const basic_format& ) ;
|
||||||
|
|
||||||
|
} // namespace io
|
||||||
|
|
||||||
|
|
||||||
|
BOOST_IO_STD ostream&
|
||||||
|
operator<<( BOOST_IO_STD ostream&, const basic_format&);
|
||||||
|
|
||||||
|
|
||||||
|
} // namespace boost
|
||||||
|
|
||||||
|
#endif // BOOST_FORMAT_FWD_HPP
|
|
@ -0,0 +1,256 @@
|
||||||
|
// -*- C++ -*-
|
||||||
|
// Boost general library format ---------------------------
|
||||||
|
// See http://www.boost.org for updates, documentation, and revision history.
|
||||||
|
|
||||||
|
// (C) Samuel Krempp 2001
|
||||||
|
// krempp@crans.ens-cachan.fr
|
||||||
|
// Permission to copy, use, modify, sell and
|
||||||
|
// distribute this software is granted provided this copyright notice appears
|
||||||
|
// in all copies. This software is provided "as is" without express or implied
|
||||||
|
// warranty, and with no claim as to its suitability for any purpose.
|
||||||
|
|
||||||
|
// ideas taken from Rüdiger Loos's format class
|
||||||
|
// and Karl Nelson's ofstream
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// format_implementation.hpp Implementation of the basic_format class
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef BOOST_FORMAT_IMPLEMENTATION_HPP
|
||||||
|
#define BOOST_FORMAT_IMPLEMENTATION_HPP
|
||||||
|
|
||||||
|
#include <boost/throw_exception.hpp>
|
||||||
|
#include <boost/assert.hpp>
|
||||||
|
#include <boost/format.hpp>
|
||||||
|
|
||||||
|
namespace boost {
|
||||||
|
|
||||||
|
// -------- format:: -------------------------------------------
|
||||||
|
basic_format::basic_format(const char* str)
|
||||||
|
: style_(0), cur_arg_(0), num_args_(0), dumped_(false),
|
||||||
|
items_(), oss_(), exceptions_(io::all_error_bits)
|
||||||
|
{
|
||||||
|
state0_.set_by_stream(oss_);
|
||||||
|
string_t emptyStr;
|
||||||
|
if( !str) str = emptyStr.c_str();
|
||||||
|
parse( str );
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef BOOST_NO_STD_LOCALE
|
||||||
|
basic_format::basic_format(const char* str, const std::locale & loc)
|
||||||
|
: style_(0), cur_arg_(0), num_args_(0), dumped_(false),
|
||||||
|
items_(), oss_(), exceptions_(io::all_error_bits)
|
||||||
|
{
|
||||||
|
oss_.imbue( loc );
|
||||||
|
state0_.set_by_stream(oss_);
|
||||||
|
string_t emptyStr;
|
||||||
|
if( !str) str = emptyStr.c_str();
|
||||||
|
parse( str );
|
||||||
|
}
|
||||||
|
|
||||||
|
basic_format::basic_format(const string_t& s, const std::locale & loc)
|
||||||
|
: style_(0), cur_arg_(0), num_args_(0), dumped_(false),
|
||||||
|
items_(), oss_(), exceptions_(io::all_error_bits)
|
||||||
|
{
|
||||||
|
oss_.imbue( loc );
|
||||||
|
state0_.set_by_stream(oss_);
|
||||||
|
parse(s);
|
||||||
|
}
|
||||||
|
#endif //BOOST_NO_STD_LOCALE
|
||||||
|
|
||||||
|
basic_format::basic_format(const string_t& s)
|
||||||
|
: style_(0), cur_arg_(0), num_args_(0), dumped_(false),
|
||||||
|
items_(), oss_(), exceptions_(io::all_error_bits)
|
||||||
|
{
|
||||||
|
state0_.set_by_stream(oss_);
|
||||||
|
parse(s);
|
||||||
|
}
|
||||||
|
|
||||||
|
basic_format:: basic_format(const basic_format& x)
|
||||||
|
: style_(x.style_), cur_arg_(x.cur_arg_), num_args_(x.num_args_), dumped_(false),
|
||||||
|
items_(x.items_), prefix_(x.prefix_), bound_(x.bound_),
|
||||||
|
oss_(), // <- we obviously can't copy x.oss_
|
||||||
|
state0_(x.state0_), exceptions_(x.exceptions_)
|
||||||
|
{
|
||||||
|
state0_.apply_on(oss_);
|
||||||
|
}
|
||||||
|
|
||||||
|
basic_format& basic_format::operator= (const basic_format& x)
|
||||||
|
{
|
||||||
|
if(this == &x)
|
||||||
|
return *this;
|
||||||
|
state0_ = x.state0_;
|
||||||
|
state0_.apply_on(oss_);
|
||||||
|
|
||||||
|
// plus all the other (trivial) assignments :
|
||||||
|
exceptions_ = x.exceptions_;
|
||||||
|
items_ = x.items_;
|
||||||
|
prefix_ = x.prefix_;
|
||||||
|
bound_=x.bound_;
|
||||||
|
style_=x.style_;
|
||||||
|
cur_arg_=x.cur_arg_;
|
||||||
|
num_args_=x.num_args_;
|
||||||
|
dumped_=x.dumped_;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
unsigned char basic_format::exceptions() const
|
||||||
|
{
|
||||||
|
return exceptions_;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned char basic_format::exceptions(unsigned char newexcept)
|
||||||
|
{
|
||||||
|
unsigned char swp = exceptions_;
|
||||||
|
exceptions_ = newexcept;
|
||||||
|
return swp;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
basic_format& basic_format ::clear()
|
||||||
|
// empty the string buffers (except bound arguments, see clear_binds() )
|
||||||
|
// and make the format object ready for formatting a new set of arguments
|
||||||
|
{
|
||||||
|
BOOST_ASSERT( bound_.size()==0 || num_args_ == static_cast<int>(bound_.size()) );
|
||||||
|
|
||||||
|
for(unsigned long i=0; i<items_.size(); ++i){
|
||||||
|
items_[i].state_ = items_[i].ref_state_;
|
||||||
|
// clear converted strings only if the corresponding argument is not bound :
|
||||||
|
if( bound_.size()==0 || !bound_[ items_[i].argN_ ] ) items_[i].res_.resize(0);
|
||||||
|
}
|
||||||
|
cur_arg_=0; dumped_=false;
|
||||||
|
// maybe first arg is bound:
|
||||||
|
if(bound_.size() != 0)
|
||||||
|
{
|
||||||
|
while(cur_arg_ < num_args_ && bound_[cur_arg_] ) ++cur_arg_;
|
||||||
|
}
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
basic_format& basic_format ::clear_binds()
|
||||||
|
// cancel all bindings, and clear()
|
||||||
|
{
|
||||||
|
bound_.resize(0);
|
||||||
|
clear();
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
basic_format& basic_format::clear_bind(int argN)
|
||||||
|
// cancel the binding of ONE argument, and clear()
|
||||||
|
{
|
||||||
|
if(argN<1 || argN > num_args_ || bound_.size()==0 || !bound_[argN-1] )
|
||||||
|
{
|
||||||
|
if( exceptions() & io::out_of_range_bit )
|
||||||
|
boost::throw_exception(io::out_of_range()); // arg not in range.
|
||||||
|
else return *this;
|
||||||
|
}
|
||||||
|
bound_[argN-1]=false;
|
||||||
|
clear();
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
std::string basic_format::str() const
|
||||||
|
{
|
||||||
|
dumped_=true;
|
||||||
|
if(items_.size()==0)
|
||||||
|
return prefix_;
|
||||||
|
if( cur_arg_ < num_args_)
|
||||||
|
if( exceptions() & io::too_few_args_bit )
|
||||||
|
boost::throw_exception(io::too_few_args()); // not enough variables have been supplied !
|
||||||
|
|
||||||
|
unsigned long sz = prefix_.size();
|
||||||
|
unsigned long i;
|
||||||
|
for(i=0; i < items_.size(); ++i)
|
||||||
|
sz += items_[i].res_.size() + items_[i].appendix_.size();
|
||||||
|
string_t res;
|
||||||
|
res.reserve(sz);
|
||||||
|
|
||||||
|
res += prefix_;
|
||||||
|
for(i=0; i < items_.size(); ++i)
|
||||||
|
{
|
||||||
|
const format_item_t& item = items_[i];
|
||||||
|
res += item.res_;
|
||||||
|
if( item.argN_ == format_item_t::argN_tabulation)
|
||||||
|
{
|
||||||
|
BOOST_ASSERT( item.pad_scheme_ & format_item_t::tabulation);
|
||||||
|
std::streamsize n = item.state_.width_ - res.size();
|
||||||
|
if( n > 0 )
|
||||||
|
res.append( n, item.state_.fill_ );
|
||||||
|
}
|
||||||
|
res += item.appendix_;
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
namespace io {
|
||||||
|
namespace detail {
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
basic_format& bind_arg_body( basic_format& self,
|
||||||
|
int argN,
|
||||||
|
const T& val)
|
||||||
|
// bind one argument to a fixed value
|
||||||
|
// this is persistent over clear() calls, thus also over str() and <<
|
||||||
|
{
|
||||||
|
if(self.dumped_) self.clear(); // needed, because we will modify cur_arg_..
|
||||||
|
if(argN<1 || argN > self.num_args_)
|
||||||
|
{
|
||||||
|
if( self.exceptions() & io::out_of_range_bit )
|
||||||
|
boost::throw_exception(io::out_of_range()); // arg not in range.
|
||||||
|
else return self;
|
||||||
|
}
|
||||||
|
if(self.bound_.size()==0)
|
||||||
|
self.bound_.assign(self.num_args_,false);
|
||||||
|
else
|
||||||
|
BOOST_ASSERT( self.num_args_ == static_cast<signed int>(self.bound_.size()) );
|
||||||
|
int o_cur_arg = self.cur_arg_;
|
||||||
|
self.cur_arg_ = argN-1; // arrays begin at 0
|
||||||
|
|
||||||
|
self.bound_[self.cur_arg_]=false; // if already set, we unset and re-sets..
|
||||||
|
self.operator%(val); // put val at the right place, because cur_arg is set
|
||||||
|
|
||||||
|
|
||||||
|
// Now re-position cur_arg before leaving :
|
||||||
|
self.cur_arg_ = o_cur_arg;
|
||||||
|
self.bound_[argN-1]=true;
|
||||||
|
if(self.cur_arg_ == argN-1 )
|
||||||
|
// hum, now this arg is bound, so move to next free arg
|
||||||
|
{
|
||||||
|
while(self.cur_arg_ < self.num_args_ && self.bound_[self.cur_arg_]) ++self.cur_arg_;
|
||||||
|
}
|
||||||
|
// In any case, we either have all args, or are on a non-binded arg :
|
||||||
|
BOOST_ASSERT( self.cur_arg_ >= self.num_args_ || ! self.bound_[self.cur_arg_]);
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
basic_format& modify_item_body( basic_format& self,
|
||||||
|
int itemN,
|
||||||
|
const T& manipulator)
|
||||||
|
// applies a manipulator to the format_item describing a given directive.
|
||||||
|
// this is a permanent change, clear or clear_binds won't cancel that.
|
||||||
|
{
|
||||||
|
if(itemN<1 || itemN >= static_cast<signed int>(self.items_.size() ))
|
||||||
|
{
|
||||||
|
if( self.exceptions() & io::out_of_range_bit )
|
||||||
|
boost::throw_exception(io::out_of_range()); // item not in range.
|
||||||
|
else return self;
|
||||||
|
}
|
||||||
|
self.items_[itemN-1].ref_state_.apply_manip( manipulator );
|
||||||
|
self.items_[itemN-1].state_ = self.items_[itemN-1].ref_state_;
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace detail
|
||||||
|
|
||||||
|
} // namespace io
|
||||||
|
|
||||||
|
} // namespace boost
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#endif // BOOST_FORMAT_IMPLEMENTATION_HPP
|
|
@ -0,0 +1,71 @@
|
||||||
|
// -*- C++ -*-
|
||||||
|
// Boost general library 'format' ---------------------------
|
||||||
|
// See http://www.boost.org for updates, documentation, and revision history.
|
||||||
|
|
||||||
|
// (C) Samuel Krempp 2001
|
||||||
|
// krempp@crans.ens-cachan.fr
|
||||||
|
// Permission to copy, use, modify, sell and
|
||||||
|
// distribute this software is granted provided this copyright notice appears
|
||||||
|
// in all copies. This software is provided "as is" without express or implied
|
||||||
|
// warranty, and with no claim as to its suitability for any purpose.
|
||||||
|
|
||||||
|
// ideas taken from Rüdiger Loos's format class
|
||||||
|
// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------------
|
||||||
|
// free_funcs.hpp : implementation of the free functions declared in namespace format
|
||||||
|
// ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#ifndef BOOST_FORMAT_FUNCS_HPP
|
||||||
|
#define BOOST_FORMAT_FUNCS_HPP
|
||||||
|
|
||||||
|
#include "boost/format.hpp"
|
||||||
|
#include "boost/throw_exception.hpp"
|
||||||
|
|
||||||
|
namespace boost {
|
||||||
|
|
||||||
|
namespace io {
|
||||||
|
inline
|
||||||
|
std::string str(const basic_format& f)
|
||||||
|
// adds up all pieces of strings and converted items, and return the formatted string
|
||||||
|
{
|
||||||
|
return f.str();
|
||||||
|
}
|
||||||
|
} // - namespace io
|
||||||
|
|
||||||
|
BOOST_IO_STD ostream&
|
||||||
|
operator<<( BOOST_IO_STD ostream& os,
|
||||||
|
const boost::basic_format& f)
|
||||||
|
// effect: "return os << str(f);" but we can try to do it faster
|
||||||
|
{
|
||||||
|
typedef boost::basic_format format_t;
|
||||||
|
if(f.items_.size()==0)
|
||||||
|
os << f.prefix_;
|
||||||
|
else {
|
||||||
|
if(f.cur_arg_ < f.num_args_)
|
||||||
|
if( f.exceptions() & io::too_few_args_bit )
|
||||||
|
boost::throw_exception(io::too_few_args()); // not enough variables have been supplied !
|
||||||
|
if(f.style_ & format_t::special_needs)
|
||||||
|
os << f.str();
|
||||||
|
else {
|
||||||
|
// else we dont have to count chars output, so we dump directly to os :
|
||||||
|
os << f.prefix_;
|
||||||
|
for(unsigned long i=0; i<f.items_.size(); ++i)
|
||||||
|
{
|
||||||
|
const format_t::format_item_t& item = f.items_[i];
|
||||||
|
os << item.res_;
|
||||||
|
os << item.appendix_;
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
f.dumped_=true;
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
} // namespace boost
|
||||||
|
|
||||||
|
|
||||||
|
#endif // BOOST_FORMAT_FUNCS_HPP
|
|
@ -0,0 +1,680 @@
|
||||||
|
|
||||||
|
// -*- C++ -*-
|
||||||
|
// Boost general library 'format' ---------------------------
|
||||||
|
// See http://www.boost.org for updates, documentation, and revision history.
|
||||||
|
|
||||||
|
// (C) Samuel Krempp 2001
|
||||||
|
// krempp@crans.ens-cachan.fr
|
||||||
|
// Permission to copy, use, modify, sell and
|
||||||
|
// distribute this software is granted provided this copyright notice appears
|
||||||
|
// in all copies. This software is provided "as is" without express or implied
|
||||||
|
// warranty, and with no claim as to its suitability for any purpose.
|
||||||
|
|
||||||
|
// ideas taken from Rüdiger Loos's format class
|
||||||
|
// and Karl Nelson's ofstream
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
// group.hpp : encapsulates a group of manipulators along with an argument
|
||||||
|
//
|
||||||
|
// group_head : cut the last element of a group out.
|
||||||
|
// (is overloaded below on each type of group)
|
||||||
|
|
||||||
|
// group_last : returns the last element of a group
|
||||||
|
// (is overloaded below on each type of group)
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef BOOST_FORMAT_GROUP_HPP
|
||||||
|
#define BOOST_FORMAT_GROUP_HPP
|
||||||
|
|
||||||
|
|
||||||
|
namespace boost {
|
||||||
|
namespace io {
|
||||||
|
|
||||||
|
|
||||||
|
namespace detail {
|
||||||
|
|
||||||
|
|
||||||
|
// empty group, but useful even though.
|
||||||
|
struct group0
|
||||||
|
{
|
||||||
|
group0() {}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Ch, class Tr>
|
||||||
|
inline
|
||||||
|
BOOST_IO_STD ostream&
|
||||||
|
operator << ( BOOST_IO_STD ostream& os,
|
||||||
|
const group0& )
|
||||||
|
{
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1>
|
||||||
|
struct group1
|
||||||
|
{
|
||||||
|
T1 a1_;
|
||||||
|
group1(T1 a1)
|
||||||
|
: a1_(a1)
|
||||||
|
{}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Ch, class Tr, class T1>
|
||||||
|
inline
|
||||||
|
BOOST_IO_STD ostream&
|
||||||
|
operator << (BOOST_IO_STD ostream& os,
|
||||||
|
const group1<T1>& x)
|
||||||
|
{
|
||||||
|
os << x.a1_;
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
template <class T1,class T2>
|
||||||
|
struct group2
|
||||||
|
{
|
||||||
|
T1 a1_;
|
||||||
|
T2 a2_;
|
||||||
|
group2(T1 a1,T2 a2)
|
||||||
|
: a1_(a1),a2_(a2)
|
||||||
|
{}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Ch, class Tr, class T1,class T2>
|
||||||
|
inline
|
||||||
|
BOOST_IO_STD ostream&
|
||||||
|
operator << (BOOST_IO_STD ostream& os,
|
||||||
|
const group2<T1,T2>& x)
|
||||||
|
{
|
||||||
|
os << x.a1_<< x.a2_;
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3>
|
||||||
|
struct group3
|
||||||
|
{
|
||||||
|
T1 a1_;
|
||||||
|
T2 a2_;
|
||||||
|
T3 a3_;
|
||||||
|
group3(T1 a1,T2 a2,T3 a3)
|
||||||
|
: a1_(a1),a2_(a2),a3_(a3)
|
||||||
|
{}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Ch, class Tr, class T1,class T2,class T3>
|
||||||
|
inline
|
||||||
|
BOOST_IO_STD ostream&
|
||||||
|
operator << (BOOST_IO_STD ostream& os,
|
||||||
|
const group3<T1,T2,T3>& x)
|
||||||
|
{
|
||||||
|
os << x.a1_<< x.a2_<< x.a3_;
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4>
|
||||||
|
struct group4
|
||||||
|
{
|
||||||
|
T1 a1_;
|
||||||
|
T2 a2_;
|
||||||
|
T3 a3_;
|
||||||
|
T4 a4_;
|
||||||
|
group4(T1 a1,T2 a2,T3 a3,T4 a4)
|
||||||
|
: a1_(a1),a2_(a2),a3_(a3),a4_(a4)
|
||||||
|
{}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Ch, class Tr, class T1,class T2,class T3,class T4>
|
||||||
|
inline
|
||||||
|
BOOST_IO_STD ostream&
|
||||||
|
operator << (BOOST_IO_STD ostream& os,
|
||||||
|
const group4<T1,T2,T3,T4>& x)
|
||||||
|
{
|
||||||
|
os << x.a1_<< x.a2_<< x.a3_<< x.a4_;
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5>
|
||||||
|
struct group5
|
||||||
|
{
|
||||||
|
T1 a1_;
|
||||||
|
T2 a2_;
|
||||||
|
T3 a3_;
|
||||||
|
T4 a4_;
|
||||||
|
T5 a5_;
|
||||||
|
group5(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5)
|
||||||
|
: a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5)
|
||||||
|
{}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5>
|
||||||
|
inline
|
||||||
|
BOOST_IO_STD ostream&
|
||||||
|
operator << (BOOST_IO_STD ostream& os,
|
||||||
|
const group5<T1,T2,T3,T4,T5>& x)
|
||||||
|
{
|
||||||
|
os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_;
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6>
|
||||||
|
struct group6
|
||||||
|
{
|
||||||
|
T1 a1_;
|
||||||
|
T2 a2_;
|
||||||
|
T3 a3_;
|
||||||
|
T4 a4_;
|
||||||
|
T5 a5_;
|
||||||
|
T6 a6_;
|
||||||
|
group6(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6)
|
||||||
|
: a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6)
|
||||||
|
{}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6>
|
||||||
|
inline
|
||||||
|
BOOST_IO_STD ostream&
|
||||||
|
operator << (BOOST_IO_STD ostream& os,
|
||||||
|
const group6<T1,T2,T3,T4,T5,T6>& x)
|
||||||
|
{
|
||||||
|
os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_;
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7>
|
||||||
|
struct group7
|
||||||
|
{
|
||||||
|
T1 a1_;
|
||||||
|
T2 a2_;
|
||||||
|
T3 a3_;
|
||||||
|
T4 a4_;
|
||||||
|
T5 a5_;
|
||||||
|
T6 a6_;
|
||||||
|
T7 a7_;
|
||||||
|
group7(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7)
|
||||||
|
: a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6),a7_(a7)
|
||||||
|
{}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6,class T7>
|
||||||
|
inline
|
||||||
|
BOOST_IO_STD ostream&
|
||||||
|
operator << (BOOST_IO_STD ostream& os,
|
||||||
|
const group7<T1,T2,T3,T4,T5,T6,T7>& x)
|
||||||
|
{
|
||||||
|
os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_<< x.a7_;
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8>
|
||||||
|
struct group8
|
||||||
|
{
|
||||||
|
T1 a1_;
|
||||||
|
T2 a2_;
|
||||||
|
T3 a3_;
|
||||||
|
T4 a4_;
|
||||||
|
T5 a5_;
|
||||||
|
T6 a6_;
|
||||||
|
T7 a7_;
|
||||||
|
T8 a8_;
|
||||||
|
group8(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8)
|
||||||
|
: a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6),a7_(a7),a8_(a8)
|
||||||
|
{}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8>
|
||||||
|
inline
|
||||||
|
BOOST_IO_STD ostream&
|
||||||
|
operator << (BOOST_IO_STD ostream& os,
|
||||||
|
const group8<T1,T2,T3,T4,T5,T6,T7,T8>& x)
|
||||||
|
{
|
||||||
|
os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_<< x.a7_<< x.a8_;
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9>
|
||||||
|
struct group9
|
||||||
|
{
|
||||||
|
T1 a1_;
|
||||||
|
T2 a2_;
|
||||||
|
T3 a3_;
|
||||||
|
T4 a4_;
|
||||||
|
T5 a5_;
|
||||||
|
T6 a6_;
|
||||||
|
T7 a7_;
|
||||||
|
T8 a8_;
|
||||||
|
T9 a9_;
|
||||||
|
group9(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8,T9 a9)
|
||||||
|
: a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6),a7_(a7),a8_(a8),a9_(a9)
|
||||||
|
{}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9>
|
||||||
|
inline
|
||||||
|
BOOST_IO_STD ostream&
|
||||||
|
operator << (BOOST_IO_STD ostream& os,
|
||||||
|
const group9<T1,T2,T3,T4,T5,T6,T7,T8,T9>& x)
|
||||||
|
{
|
||||||
|
os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_<< x.a7_<< x.a8_<< x.a9_;
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9,class T10>
|
||||||
|
struct group10
|
||||||
|
{
|
||||||
|
T1 a1_;
|
||||||
|
T2 a2_;
|
||||||
|
T3 a3_;
|
||||||
|
T4 a4_;
|
||||||
|
T5 a5_;
|
||||||
|
T6 a6_;
|
||||||
|
T7 a7_;
|
||||||
|
T8 a8_;
|
||||||
|
T9 a9_;
|
||||||
|
T10 a10_;
|
||||||
|
group10(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8,T9 a9,T10 a10)
|
||||||
|
: a1_(a1),a2_(a2),a3_(a3),a4_(a4),a5_(a5),a6_(a6),a7_(a7),a8_(a8),a9_(a9),a10_(a10)
|
||||||
|
{}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Ch, class Tr, class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9,class T10>
|
||||||
|
inline
|
||||||
|
BOOST_IO_STD ostream&
|
||||||
|
operator << (BOOST_IO_STD ostream& os,
|
||||||
|
const group10<T1,T2,T3,T4,T5,T6,T7,T8,T9,T10>& x)
|
||||||
|
{
|
||||||
|
os << x.a1_<< x.a2_<< x.a3_<< x.a4_<< x.a5_<< x.a6_<< x.a7_<< x.a8_<< x.a9_<< x.a10_;
|
||||||
|
return os;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
template <class T1,class T2>
|
||||||
|
inline
|
||||||
|
group1<T1>
|
||||||
|
group_head( group2<T1,T2> const& x)
|
||||||
|
{
|
||||||
|
return group1<T1> (x.a1_);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2>
|
||||||
|
inline
|
||||||
|
group1<T2>
|
||||||
|
group_last( group2<T1,T2> const& x)
|
||||||
|
{
|
||||||
|
return group1<T2> (x.a2_);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3>
|
||||||
|
inline
|
||||||
|
group2<T1,T2>
|
||||||
|
group_head( group3<T1,T2,T3> const& x)
|
||||||
|
{
|
||||||
|
return group2<T1,T2> (x.a1_,x.a2_);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3>
|
||||||
|
inline
|
||||||
|
group1<T3>
|
||||||
|
group_last( group3<T1,T2,T3> const& x)
|
||||||
|
{
|
||||||
|
return group1<T3> (x.a3_);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4>
|
||||||
|
inline
|
||||||
|
group3<T1,T2,T3>
|
||||||
|
group_head( group4<T1,T2,T3,T4> const& x)
|
||||||
|
{
|
||||||
|
return group3<T1,T2,T3> (x.a1_,x.a2_,x.a3_);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4>
|
||||||
|
inline
|
||||||
|
group1<T4>
|
||||||
|
group_last( group4<T1,T2,T3,T4> const& x)
|
||||||
|
{
|
||||||
|
return group1<T4> (x.a4_);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5>
|
||||||
|
inline
|
||||||
|
group4<T1,T2,T3,T4>
|
||||||
|
group_head( group5<T1,T2,T3,T4,T5> const& x)
|
||||||
|
{
|
||||||
|
return group4<T1,T2,T3,T4> (x.a1_,x.a2_,x.a3_,x.a4_);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5>
|
||||||
|
inline
|
||||||
|
group1<T5>
|
||||||
|
group_last( group5<T1,T2,T3,T4,T5> const& x)
|
||||||
|
{
|
||||||
|
return group1<T5> (x.a5_);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6>
|
||||||
|
inline
|
||||||
|
group5<T1,T2,T3,T4,T5>
|
||||||
|
group_head( group6<T1,T2,T3,T4,T5,T6> const& x)
|
||||||
|
{
|
||||||
|
return group5<T1,T2,T3,T4,T5> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6>
|
||||||
|
inline
|
||||||
|
group1<T6>
|
||||||
|
group_last( group6<T1,T2,T3,T4,T5,T6> const& x)
|
||||||
|
{
|
||||||
|
return group1<T6> (x.a6_);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7>
|
||||||
|
inline
|
||||||
|
group6<T1,T2,T3,T4,T5,T6>
|
||||||
|
group_head( group7<T1,T2,T3,T4,T5,T6,T7> const& x)
|
||||||
|
{
|
||||||
|
return group6<T1,T2,T3,T4,T5,T6> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_,x.a6_);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7>
|
||||||
|
inline
|
||||||
|
group1<T7>
|
||||||
|
group_last( group7<T1,T2,T3,T4,T5,T6,T7> const& x)
|
||||||
|
{
|
||||||
|
return group1<T7> (x.a7_);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8>
|
||||||
|
inline
|
||||||
|
group7<T1,T2,T3,T4,T5,T6,T7>
|
||||||
|
group_head( group8<T1,T2,T3,T4,T5,T6,T7,T8> const& x)
|
||||||
|
{
|
||||||
|
return group7<T1,T2,T3,T4,T5,T6,T7> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_,x.a6_,x.a7_);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8>
|
||||||
|
inline
|
||||||
|
group1<T8>
|
||||||
|
group_last( group8<T1,T2,T3,T4,T5,T6,T7,T8> const& x)
|
||||||
|
{
|
||||||
|
return group1<T8> (x.a8_);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9>
|
||||||
|
inline
|
||||||
|
group8<T1,T2,T3,T4,T5,T6,T7,T8>
|
||||||
|
group_head( group9<T1,T2,T3,T4,T5,T6,T7,T8,T9> const& x)
|
||||||
|
{
|
||||||
|
return group8<T1,T2,T3,T4,T5,T6,T7,T8> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_,x.a6_,x.a7_,x.a8_);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9>
|
||||||
|
inline
|
||||||
|
group1<T9>
|
||||||
|
group_last( group9<T1,T2,T3,T4,T5,T6,T7,T8,T9> const& x)
|
||||||
|
{
|
||||||
|
return group1<T9> (x.a9_);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9,class T10>
|
||||||
|
inline
|
||||||
|
group9<T1,T2,T3,T4,T5,T6,T7,T8,T9>
|
||||||
|
group_head( group10<T1,T2,T3,T4,T5,T6,T7,T8,T9,T10> const& x)
|
||||||
|
{
|
||||||
|
return group9<T1,T2,T3,T4,T5,T6,T7,T8,T9> (x.a1_,x.a2_,x.a3_,x.a4_,x.a5_,x.a6_,x.a7_,x.a8_,x.a9_);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9,class T10>
|
||||||
|
inline
|
||||||
|
group1<T10>
|
||||||
|
group_last( group10<T1,T2,T3,T4,T5,T6,T7,T8,T9,T10> const& x)
|
||||||
|
{
|
||||||
|
return group1<T10> (x.a10_);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
} // namespace detail
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// helper functions
|
||||||
|
|
||||||
|
|
||||||
|
inline detail::group1< detail::group0 >
|
||||||
|
group() { return detail::group1< detail::group0 > ( detail::group0() ); }
|
||||||
|
|
||||||
|
template <class T1, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group2<T1, Var const&> >
|
||||||
|
group(T1 a1, Var const& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group2<T1, Var const&> >
|
||||||
|
( detail::group2<T1, Var const&>
|
||||||
|
(a1, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group3<T1,T2, Var const&> >
|
||||||
|
group(T1 a1,T2 a2, Var const& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group3<T1,T2, Var const&> >
|
||||||
|
( detail::group3<T1,T2, Var const&>
|
||||||
|
(a1,a2, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group4<T1,T2,T3, Var const&> >
|
||||||
|
group(T1 a1,T2 a2,T3 a3, Var const& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group4<T1,T2,T3, Var const&> >
|
||||||
|
( detail::group4<T1,T2,T3, Var const&>
|
||||||
|
(a1,a2,a3, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group5<T1,T2,T3,T4, Var const&> >
|
||||||
|
group(T1 a1,T2 a2,T3 a3,T4 a4, Var const& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group5<T1,T2,T3,T4, Var const&> >
|
||||||
|
( detail::group5<T1,T2,T3,T4, Var const&>
|
||||||
|
(a1,a2,a3,a4, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group6<T1,T2,T3,T4,T5, Var const&> >
|
||||||
|
group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5, Var const& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group6<T1,T2,T3,T4,T5, Var const&> >
|
||||||
|
( detail::group6<T1,T2,T3,T4,T5, Var const&>
|
||||||
|
(a1,a2,a3,a4,a5, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group7<T1,T2,T3,T4,T5,T6, Var const&> >
|
||||||
|
group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6, Var const& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group7<T1,T2,T3,T4,T5,T6, Var const&> >
|
||||||
|
( detail::group7<T1,T2,T3,T4,T5,T6, Var const&>
|
||||||
|
(a1,a2,a3,a4,a5,a6, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group8<T1,T2,T3,T4,T5,T6,T7, Var const&> >
|
||||||
|
group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7, Var const& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group8<T1,T2,T3,T4,T5,T6,T7, Var const&> >
|
||||||
|
( detail::group8<T1,T2,T3,T4,T5,T6,T7, Var const&>
|
||||||
|
(a1,a2,a3,a4,a5,a6,a7, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var const&> >
|
||||||
|
group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8, Var const& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var const&> >
|
||||||
|
( detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var const&>
|
||||||
|
(a1,a2,a3,a4,a5,a6,a7,a8, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var const&> >
|
||||||
|
group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8,T9 a9, Var const& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var const&> >
|
||||||
|
( detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var const&>
|
||||||
|
(a1,a2,a3,a4,a5,a6,a7,a8,a9, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef BOOST_NO_OVERLOAD_FOR_NON_CONST
|
||||||
|
|
||||||
|
template <class T1, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group2<T1, Var&> >
|
||||||
|
group(T1 a1, Var& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group2<T1, Var&> >
|
||||||
|
( detail::group2<T1, Var&>
|
||||||
|
(a1, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group3<T1,T2, Var&> >
|
||||||
|
group(T1 a1,T2 a2, Var& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group3<T1,T2, Var&> >
|
||||||
|
( detail::group3<T1,T2, Var&>
|
||||||
|
(a1,a2, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group4<T1,T2,T3, Var&> >
|
||||||
|
group(T1 a1,T2 a2,T3 a3, Var& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group4<T1,T2,T3, Var&> >
|
||||||
|
( detail::group4<T1,T2,T3, Var&>
|
||||||
|
(a1,a2,a3, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group5<T1,T2,T3,T4, Var&> >
|
||||||
|
group(T1 a1,T2 a2,T3 a3,T4 a4, Var& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group5<T1,T2,T3,T4, Var&> >
|
||||||
|
( detail::group5<T1,T2,T3,T4, Var&>
|
||||||
|
(a1,a2,a3,a4, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group6<T1,T2,T3,T4,T5, Var&> >
|
||||||
|
group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5, Var& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group6<T1,T2,T3,T4,T5, Var&> >
|
||||||
|
( detail::group6<T1,T2,T3,T4,T5, Var&>
|
||||||
|
(a1,a2,a3,a4,a5, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group7<T1,T2,T3,T4,T5,T6, Var&> >
|
||||||
|
group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6, Var& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group7<T1,T2,T3,T4,T5,T6, Var&> >
|
||||||
|
( detail::group7<T1,T2,T3,T4,T5,T6, Var&>
|
||||||
|
(a1,a2,a3,a4,a5,a6, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group8<T1,T2,T3,T4,T5,T6,T7, Var&> >
|
||||||
|
group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7, Var& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group8<T1,T2,T3,T4,T5,T6,T7, Var&> >
|
||||||
|
( detail::group8<T1,T2,T3,T4,T5,T6,T7, Var&>
|
||||||
|
(a1,a2,a3,a4,a5,a6,a7, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var&> >
|
||||||
|
group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8, Var& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var&> >
|
||||||
|
( detail::group9<T1,T2,T3,T4,T5,T6,T7,T8, Var&>
|
||||||
|
(a1,a2,a3,a4,a5,a6,a7,a8, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <class T1,class T2,class T3,class T4,class T5,class T6,class T7,class T8,class T9, class Var>
|
||||||
|
inline
|
||||||
|
detail::group1< detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var&> >
|
||||||
|
group(T1 a1,T2 a2,T3 a3,T4 a4,T5 a5,T6 a6,T7 a7,T8 a8,T9 a9, Var& var)
|
||||||
|
{
|
||||||
|
return detail::group1< detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var&> >
|
||||||
|
( detail::group10<T1,T2,T3,T4,T5,T6,T7,T8,T9, Var&>
|
||||||
|
(a1,a2,a3,a4,a5,a6,a7,a8,a9, var)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#endif //end- #ifndef BOOST_NO_OVERLOAD_FOR_NON_CONST
|
||||||
|
|
||||||
|
|
||||||
|
} // namespace io
|
||||||
|
|
||||||
|
} // namespace boost
|
||||||
|
|
||||||
|
|
||||||
|
#endif // BOOST_FORMAT_GROUP_HPP
|
|
@ -0,0 +1,167 @@
|
||||||
|
// -*- C++ -*-
|
||||||
|
// Boost general library 'format' ---------------------------
|
||||||
|
// See http://www.boost.org for updates, documentation, and revision history.
|
||||||
|
|
||||||
|
// (C) Samuel Krempp 2001
|
||||||
|
// krempp@crans.ens-cachan.fr
|
||||||
|
// Permission to copy, use, modify, sell and
|
||||||
|
// distribute this software is granted provided this copyright notice appears
|
||||||
|
// in all copies. This software is provided "as is" without express or implied
|
||||||
|
// warranty, and with no claim as to its suitability for any purpose.
|
||||||
|
|
||||||
|
// ideas taken from Rüdiger Loos's format class
|
||||||
|
// and Karl Nelson's ofstream
|
||||||
|
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
// internals.hpp : internal structs. included by format.hpp
|
||||||
|
// stream_format_state, and format_item
|
||||||
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef BOOST_FORMAT_INTERNALS_HPP
|
||||||
|
#define BOOST_FORMAT_INTERNALS_HPP
|
||||||
|
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <sstream>
|
||||||
|
|
||||||
|
namespace boost {
|
||||||
|
namespace io {
|
||||||
|
namespace detail {
|
||||||
|
|
||||||
|
|
||||||
|
// --------------
|
||||||
|
// set of params that define the format state of a stream
|
||||||
|
|
||||||
|
struct stream_format_state
|
||||||
|
{
|
||||||
|
typedef std::ios basic_ios;
|
||||||
|
|
||||||
|
std::streamsize width_;
|
||||||
|
std::streamsize precision_;
|
||||||
|
char fill_;
|
||||||
|
std::ios::fmtflags flags_;
|
||||||
|
|
||||||
|
stream_format_state() : width_(-1), precision_(-1), fill_(0), flags_(std::ios::dec) {}
|
||||||
|
stream_format_state(basic_ios& os) {set_by_stream(os); }
|
||||||
|
|
||||||
|
void apply_on(basic_ios & os) const; //- applies format_state to the stream
|
||||||
|
template<class T> void apply_manip(T manipulator) //- modifies state by applying manipulator.
|
||||||
|
{ apply_manip_body<T>( *this, manipulator) ; }
|
||||||
|
void reset(); //- sets to default state.
|
||||||
|
void set_by_stream(const basic_ios& os); //- sets to os's state.
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// --------------
|
||||||
|
// format_item : stores all parameters that can be defined by directives in the format-string
|
||||||
|
|
||||||
|
struct format_item
|
||||||
|
{
|
||||||
|
enum pad_values { zeropad = 1, spacepad =2, centered=4, tabulation = 8 };
|
||||||
|
|
||||||
|
enum arg_values { argN_no_posit = -1, // non-positional directive. argN will be set later.
|
||||||
|
argN_tabulation = -2, // tabulation directive. (no argument read)
|
||||||
|
argN_ignored = -3 // ignored directive. (no argument read)
|
||||||
|
};
|
||||||
|
typedef BOOST_IO_STD ios basic_ios;
|
||||||
|
typedef detail::stream_format_state stream_format_state;
|
||||||
|
typedef std::string string_t;
|
||||||
|
typedef BOOST_IO_STD ostringstream internal_stream_t;
|
||||||
|
|
||||||
|
|
||||||
|
int argN_; //- argument number (starts at 0, eg : %1 => argN=0)
|
||||||
|
// negative values are used for items that don't process
|
||||||
|
// an argument
|
||||||
|
string_t res_; //- result of the formatting of this item
|
||||||
|
string_t appendix_; //- piece of string between this item and the next
|
||||||
|
|
||||||
|
stream_format_state ref_state_;// set by parsing the format_string, is only affected by modify_item
|
||||||
|
stream_format_state state_; // always same as ref_state, _unless_ modified by manipulators 'group(..)'
|
||||||
|
|
||||||
|
// non-stream format-state parameters
|
||||||
|
signed int truncate_; //- is >=0 for directives like %.5s (take 5 chars from the string)
|
||||||
|
unsigned int pad_scheme_; //- several possible padding schemes can mix. see pad_values
|
||||||
|
|
||||||
|
format_item() : argN_(argN_no_posit), truncate_(-1), pad_scheme_(0) {}
|
||||||
|
|
||||||
|
void compute_states(); // sets states according to truncate and pad_scheme.
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// -----------------------------------------------------------
|
||||||
|
// Definitions
|
||||||
|
// -----------------------------------------------------------
|
||||||
|
|
||||||
|
// --- stream_format_state:: -------------------------------------------
|
||||||
|
inline
|
||||||
|
void stream_format_state::apply_on(basic_ios & os) const
|
||||||
|
// set the state of this stream according to our params
|
||||||
|
{
|
||||||
|
if(width_ != -1)
|
||||||
|
os.width(width_);
|
||||||
|
if(precision_ != -1)
|
||||||
|
os.precision(precision_);
|
||||||
|
if(fill_ != 0)
|
||||||
|
os.fill(fill_);
|
||||||
|
os.flags(flags_);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
void stream_format_state::set_by_stream(const basic_ios& os)
|
||||||
|
// set our params according to the state of this stream
|
||||||
|
{
|
||||||
|
flags_ = os.flags();
|
||||||
|
width_ = os.width();
|
||||||
|
precision_ = os.precision();
|
||||||
|
fill_ = os.fill();
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class T> inline
|
||||||
|
void apply_manip_body( stream_format_state& self,
|
||||||
|
T manipulator)
|
||||||
|
// modify our params according to the manipulator
|
||||||
|
{
|
||||||
|
BOOST_IO_STD stringstream ss;
|
||||||
|
self.apply_on( ss );
|
||||||
|
ss << manipulator;
|
||||||
|
self.set_by_stream( ss );
|
||||||
|
}
|
||||||
|
|
||||||
|
inline
|
||||||
|
void stream_format_state::reset()
|
||||||
|
// set our params to standard's default state
|
||||||
|
{
|
||||||
|
width_=-1; precision_=-1; fill_=0;
|
||||||
|
flags_ = std::ios::dec;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// --- format_items:: -------------------------------------------
|
||||||
|
inline
|
||||||
|
void format_item::compute_states()
|
||||||
|
// reflect pad_scheme_ on state_ and ref_state_
|
||||||
|
// because some pad_schemes has complex consequences on several state params.
|
||||||
|
{
|
||||||
|
if(pad_scheme_ & zeropad)
|
||||||
|
{
|
||||||
|
if(ref_state_.flags_ & std::ios::left)
|
||||||
|
{
|
||||||
|
pad_scheme_ = pad_scheme_ & (~zeropad); // ignore zeropad in left alignment
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
ref_state_.fill_='0';
|
||||||
|
ref_state_.flags_ |= std::ios::internal;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
state_ = ref_state_;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
} } } // namespaces boost :: io :: detail
|
||||||
|
|
||||||
|
|
||||||
|
#endif // BOOST_FORMAT_INTERNALS_HPP
|
|
@ -0,0 +1,65 @@
|
||||||
|
// -*- C++ -*-
|
||||||
|
// Boost general library 'format' ---------------------------
|
||||||
|
// See http://www.boost.org for updates, documentation, and revision history.
|
||||||
|
|
||||||
|
// (C) Samuel Krempp 2001
|
||||||
|
// krempp@crans.ens-cachan.fr
|
||||||
|
// Permission to copy, use, modify, sell and
|
||||||
|
// distribute this software is granted provided this copyright notice appears
|
||||||
|
// in all copies. This software is provided "as is" without express or implied
|
||||||
|
// warranty, and with no claim as to its suitability for any purpose.
|
||||||
|
|
||||||
|
// ideas taken from Rüdiger Loos's format class
|
||||||
|
// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------------
|
||||||
|
// internals_fwd.hpp : forward declarations, for internal headers
|
||||||
|
// ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#ifndef BOOST_FORMAT_INTERNAL_FWD_HPP
|
||||||
|
#define BOOST_FORMAT_INTERNAL_FWD_HPP
|
||||||
|
|
||||||
|
#include "boost/format/format_fwd.hpp"
|
||||||
|
|
||||||
|
|
||||||
|
namespace boost {
|
||||||
|
namespace io {
|
||||||
|
|
||||||
|
namespace detail {
|
||||||
|
struct stream_format_state;
|
||||||
|
struct format_item;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
namespace detail {
|
||||||
|
|
||||||
|
// these functions were intended as methods,
|
||||||
|
// but MSVC have problems with template member functions :
|
||||||
|
|
||||||
|
// defined in format_implementation.hpp :
|
||||||
|
template<class T>
|
||||||
|
basic_format& modify_item_body( basic_format& self,
|
||||||
|
int itemN, const T& manipulator);
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
basic_format& bind_arg_body( basic_format& self,
|
||||||
|
int argN, const T& val);
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
void apply_manip_body( stream_format_state& self,
|
||||||
|
T manipulator);
|
||||||
|
|
||||||
|
// argument feeding (defined in feed_args.hpp ) :
|
||||||
|
template<class T>
|
||||||
|
void distribute(basic_format& self, T x);
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
basic_format& feed(basic_format& self, T x);
|
||||||
|
|
||||||
|
} // namespace detail
|
||||||
|
|
||||||
|
} // namespace io
|
||||||
|
} // namespace boost
|
||||||
|
|
||||||
|
|
||||||
|
#endif // BOOST_FORMAT_INTERNAL_FWD_HPP
|
|
@ -0,0 +1,48 @@
|
||||||
|
// -*- C++ -*-
|
||||||
|
// Boost general library 'format' ---------------------------
|
||||||
|
// See http://www.boost.org for updates, documentation, and revision history.
|
||||||
|
|
||||||
|
// (C) Samuel Krempp 2001
|
||||||
|
// krempp@crans.ens-cachan.fr
|
||||||
|
// Permission to copy, use, modify, sell and
|
||||||
|
// distribute this software is granted provided this copyright notice appears
|
||||||
|
// in all copies. This software is provided "as is" without express or implied
|
||||||
|
// warranty, and with no claim as to its suitability for any purpose.
|
||||||
|
|
||||||
|
// ideas taken from Rüdiger Loos's format class
|
||||||
|
// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------------
|
||||||
|
// macros_default.hpp : configuration for the format library
|
||||||
|
// provides default values for the stl workaround macros
|
||||||
|
// ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#ifndef BOOST_FORMAT_MACROS_DEFAULT_HPP
|
||||||
|
#define BOOST_FORMAT_MACROS_DEFAULT_HPP
|
||||||
|
|
||||||
|
// *** This should go to "boost/config/suffix.hpp".
|
||||||
|
|
||||||
|
#ifndef BOOST_IO_STD
|
||||||
|
# define BOOST_IO_STD std::
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// **** Workaround for io streams, stlport and msvc.
|
||||||
|
#ifdef BOOST_IO_NEEDS_USING_DECLARATION
|
||||||
|
namespace boost {
|
||||||
|
using std::char_traits;
|
||||||
|
using std::basic_ostream;
|
||||||
|
using std::basic_ostringstream;
|
||||||
|
namespace io {
|
||||||
|
using std::basic_ostream;
|
||||||
|
namespace detail {
|
||||||
|
using std::basic_ios;
|
||||||
|
using std::basic_ostream;
|
||||||
|
using std::basic_ostringstream;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
#endif // BOOST_FORMAT_MACROS_DEFAULT_HPP
|
|
@ -0,0 +1,454 @@
|
||||||
|
// -*- C++ -*-
|
||||||
|
// Boost general library 'format' ---------------------------
|
||||||
|
// See http://www.boost.org for updates, documentation, and revision history.
|
||||||
|
|
||||||
|
// (C) Samuel Krempp 2001
|
||||||
|
// krempp@crans.ens-cachan.fr
|
||||||
|
// Permission to copy, use, modify, sell and
|
||||||
|
// distribute this software is granted provided this copyright notice appears
|
||||||
|
// in all copies. This software is provided "as is" without express or implied
|
||||||
|
// warranty, and with no claim as to its suitability for any purpose.
|
||||||
|
|
||||||
|
// ideas taken from Rudiger Loos's format class
|
||||||
|
// and Karl Nelson's ofstream (also took its parsing code as basis for printf parsing)
|
||||||
|
|
||||||
|
// ------------------------------------------------------------------------------
|
||||||
|
// parsing.hpp : implementation of the parsing member functions
|
||||||
|
// ( parse, parse_printf_directive)
|
||||||
|
// ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
|
||||||
|
#ifndef BOOST_FORMAT_PARSING_HPP
|
||||||
|
#define BOOST_FORMAT_PARSING_HPP
|
||||||
|
|
||||||
|
|
||||||
|
#include <boost/format.hpp>
|
||||||
|
#include <boost/throw_exception.hpp>
|
||||||
|
#include <boost/assert.hpp>
|
||||||
|
|
||||||
|
|
||||||
|
namespace boost {
|
||||||
|
namespace io {
|
||||||
|
namespace detail {
|
||||||
|
|
||||||
|
template<class Stream> inline
|
||||||
|
bool wrap_isdigit(char c, Stream &os)
|
||||||
|
{
|
||||||
|
#ifndef BOOST_NO_LOCALE_ISIDIGIT
|
||||||
|
return std::isdigit(c, os.rdbuf()->getloc() );
|
||||||
|
# else
|
||||||
|
using namespace std;
|
||||||
|
return isdigit(c);
|
||||||
|
#endif
|
||||||
|
} //end- wrap_isdigit(..)
|
||||||
|
|
||||||
|
template<class Res> inline
|
||||||
|
Res str2int(const std::string& s,
|
||||||
|
std::string::size_type start,
|
||||||
|
BOOST_IO_STD ios &os,
|
||||||
|
const Res = Res(0) )
|
||||||
|
// Input : char string, with starting index
|
||||||
|
// a basic_ios& merely to call its widen/narrow member function in the desired locale.
|
||||||
|
// Effects : reads s[start:] and converts digits into an integral n, of type Res
|
||||||
|
// Returns : n
|
||||||
|
{
|
||||||
|
Res n = 0;
|
||||||
|
while(start<s.size() && wrap_isdigit(s[start], os) ) {
|
||||||
|
char cur_ch = s[start];
|
||||||
|
BOOST_ASSERT(cur_ch != 0 ); // since we called isdigit, this should not happen.
|
||||||
|
n *= 10;
|
||||||
|
n += cur_ch - '0'; // 22.2.1.1.2 of the C++ standard
|
||||||
|
++start;
|
||||||
|
}
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
|
void skip_asterisk(const std::string & buf,
|
||||||
|
std::string::size_type * pos_p,
|
||||||
|
BOOST_IO_STD ios &os)
|
||||||
|
// skip printf's "asterisk-fields" directives in the format-string buf
|
||||||
|
// Input : char string, with starting index *pos_p
|
||||||
|
// a basic_ios& merely to call its widen/narrow member function in the desired locale.
|
||||||
|
// Effects : advance *pos_p by skipping printf's asterisk fields.
|
||||||
|
// Returns : nothing
|
||||||
|
{
|
||||||
|
using namespace std;
|
||||||
|
BOOST_ASSERT( pos_p != 0);
|
||||||
|
if(*pos_p >= buf.size() ) return;
|
||||||
|
if(buf[ *pos_p]=='*') {
|
||||||
|
++ (*pos_p);
|
||||||
|
while (*pos_p < buf.size() && wrap_isdigit(buf[*pos_p],os)) ++(*pos_p);
|
||||||
|
if(buf[*pos_p]=='$') ++(*pos_p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
inline void maybe_throw_exception( unsigned char exceptions)
|
||||||
|
// auxiliary func called by parse_printf_directive
|
||||||
|
// for centralising error handling
|
||||||
|
// it either throws if user sets the corresponding flag, or does nothing.
|
||||||
|
{
|
||||||
|
if(exceptions & io::bad_format_string_bit)
|
||||||
|
boost::throw_exception(io::bad_format_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
bool parse_printf_directive(const std::string & buf,
|
||||||
|
std::string::size_type * pos_p,
|
||||||
|
detail::format_item * fpar,
|
||||||
|
BOOST_IO_STD ios &os,
|
||||||
|
unsigned char exceptions)
|
||||||
|
// Input : a 'printf-directive' in the format-string, starting at buf[ *pos_p ]
|
||||||
|
// a basic_ios& merely to call its widen/narrow member function in the desired locale.
|
||||||
|
// a bitset'excpetions' telling whether to throw exceptions on errors.
|
||||||
|
// Returns : true if parse somehow succeeded (possibly ignoring errors if exceptions disabled)
|
||||||
|
// false if it failed so bad that the directive should be printed verbatim
|
||||||
|
// Effects : - *pos_p is incremented so that buf[*pos_p] is the first char after the directive
|
||||||
|
// - *fpar is set with the parameters read in the directive
|
||||||
|
{
|
||||||
|
typedef format_item format_item_t;
|
||||||
|
BOOST_ASSERT( pos_p != 0);
|
||||||
|
std::string::size_type &i1 = *pos_p,
|
||||||
|
i0;
|
||||||
|
fpar->argN_ = format_item_t::argN_no_posit; // if no positional-directive
|
||||||
|
|
||||||
|
bool in_brackets=false;
|
||||||
|
if(buf[i1]=='|')
|
||||||
|
{
|
||||||
|
in_brackets=true;
|
||||||
|
if( ++i1 >= buf.size() ) {
|
||||||
|
maybe_throw_exception(exceptions);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// the flag '0' would be picked as a digit for argument order, but here it's a flag :
|
||||||
|
if(buf[i1]=='0')
|
||||||
|
goto parse_flags;
|
||||||
|
|
||||||
|
// handle argument order (%2$d) or possibly width specification: %2d
|
||||||
|
i0 = i1; // save position before digits
|
||||||
|
while (i1 < buf.size() && wrap_isdigit(buf[i1], os))
|
||||||
|
++i1;
|
||||||
|
if (i1!=i0)
|
||||||
|
{
|
||||||
|
if( i1 >= buf.size() ) {
|
||||||
|
maybe_throw_exception(exceptions);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
int n=str2int(buf,i0, os, int(0) );
|
||||||
|
|
||||||
|
// %N% case : this is already the end of the directive
|
||||||
|
if( buf[i1] == '%' )
|
||||||
|
{
|
||||||
|
fpar->argN_ = n-1;
|
||||||
|
++i1;
|
||||||
|
if( in_brackets)
|
||||||
|
maybe_throw_exception(exceptions);
|
||||||
|
// but don't return. maybe "%" was used in lieu of '$', so we go on.
|
||||||
|
else return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if ( buf[i1]=='$' )
|
||||||
|
{
|
||||||
|
fpar->argN_ = n-1;
|
||||||
|
++i1;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// non-positionnal directive
|
||||||
|
fpar->ref_state_.width_ = n;
|
||||||
|
fpar->argN_ = format_item_t::argN_no_posit;
|
||||||
|
goto parse_precision;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
parse_flags:
|
||||||
|
// handle flags
|
||||||
|
while ( i1 <buf.size()) // as long as char is one of + - = # 0 l h or ' '
|
||||||
|
{
|
||||||
|
// misc switches
|
||||||
|
switch (buf[i1])
|
||||||
|
{
|
||||||
|
case '\'' : break; // no effect yet. (painful to implement)
|
||||||
|
case 'l':
|
||||||
|
case 'h': // short/long modifier : for printf-comaptibility (no action needed)
|
||||||
|
break;
|
||||||
|
case '-':
|
||||||
|
fpar->ref_state_.flags_ |= std::ios::left;
|
||||||
|
break;
|
||||||
|
case '=':
|
||||||
|
fpar->pad_scheme_ |= format_item_t::centered;
|
||||||
|
break;
|
||||||
|
case ' ':
|
||||||
|
fpar->pad_scheme_ |= format_item_t::spacepad;
|
||||||
|
break;
|
||||||
|
case '+':
|
||||||
|
fpar->ref_state_.flags_ |= std::ios::showpos;
|
||||||
|
break;
|
||||||
|
case '0':
|
||||||
|
fpar->pad_scheme_ |= format_item_t::zeropad;
|
||||||
|
// need to know alignment before really setting flags,
|
||||||
|
// so just add 'zeropad' flag for now, it will be processed later.
|
||||||
|
break;
|
||||||
|
case '#':
|
||||||
|
fpar->ref_state_.flags_ |= std::ios::showpoint | std::ios::showbase;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
goto parse_width;
|
||||||
|
}
|
||||||
|
++i1;
|
||||||
|
} // loop on flag.
|
||||||
|
if( i1>=buf.size()) {
|
||||||
|
maybe_throw_exception(exceptions);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
parse_width:
|
||||||
|
// handle width spec
|
||||||
|
skip_asterisk(buf, &i1, os); // skips 'asterisk fields' : *, or *N$
|
||||||
|
i0 = i1; // save position before digits
|
||||||
|
while (i1<buf.size() && wrap_isdigit(buf[i1], os))
|
||||||
|
i1++;
|
||||||
|
|
||||||
|
if (i1!=i0)
|
||||||
|
{ fpar->ref_state_.width_ = str2int( buf,i0, os, std::streamsize(0) ); }
|
||||||
|
|
||||||
|
parse_precision:
|
||||||
|
if( i1>=buf.size()) {
|
||||||
|
maybe_throw_exception(exceptions);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
// handle precision spec
|
||||||
|
if (buf[i1]=='.')
|
||||||
|
{
|
||||||
|
++i1;
|
||||||
|
skip_asterisk(buf, &i1, os);
|
||||||
|
i0 = i1; // save position before digits
|
||||||
|
while (i1<buf.size() && wrap_isdigit(buf[i1], os))
|
||||||
|
++i1;
|
||||||
|
|
||||||
|
if(i1==i0)
|
||||||
|
fpar->ref_state_.precision_ = 0;
|
||||||
|
else
|
||||||
|
fpar->ref_state_.precision_ = str2int(buf,i0, os, std::streamsize(0) );
|
||||||
|
}
|
||||||
|
|
||||||
|
// handle formatting-type flags :
|
||||||
|
while( i1<buf.size() &&
|
||||||
|
( buf[i1]=='l' || buf[i1]=='L' || buf[i1]=='h') )
|
||||||
|
++i1;
|
||||||
|
if( i1>=buf.size()) {
|
||||||
|
maybe_throw_exception(exceptions);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if( in_brackets && buf[i1]=='|' )
|
||||||
|
{
|
||||||
|
++i1;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
switch (buf[i1])
|
||||||
|
{
|
||||||
|
case 'X':
|
||||||
|
fpar->ref_state_.flags_ |= std::ios::uppercase;
|
||||||
|
case 'p': // pointer => set hex.
|
||||||
|
case 'x':
|
||||||
|
fpar->ref_state_.flags_ &= ~std::ios::basefield;
|
||||||
|
fpar->ref_state_.flags_ |= std::ios::hex;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'o':
|
||||||
|
fpar->ref_state_.flags_ &= ~std::ios::basefield;
|
||||||
|
fpar->ref_state_.flags_ |= std::ios::oct;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'E':
|
||||||
|
fpar->ref_state_.flags_ |= std::ios::uppercase;
|
||||||
|
case 'e':
|
||||||
|
fpar->ref_state_.flags_ &= ~std::ios::floatfield;
|
||||||
|
fpar->ref_state_.flags_ |= std::ios::scientific;
|
||||||
|
|
||||||
|
fpar->ref_state_.flags_ &= ~std::ios::basefield;
|
||||||
|
fpar->ref_state_.flags_ |= std::ios::dec;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'f':
|
||||||
|
fpar->ref_state_.flags_ &= ~std::ios::floatfield;
|
||||||
|
fpar->ref_state_.flags_ |= std::ios::fixed;
|
||||||
|
case 'u':
|
||||||
|
case 'd':
|
||||||
|
case 'i':
|
||||||
|
fpar->ref_state_.flags_ &= ~std::ios::basefield;
|
||||||
|
fpar->ref_state_.flags_ |= std::ios::dec;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'T':
|
||||||
|
++i1;
|
||||||
|
if( i1 >= buf.size())
|
||||||
|
maybe_throw_exception(exceptions);
|
||||||
|
else
|
||||||
|
fpar->ref_state_.fill_ = buf[i1];
|
||||||
|
fpar->pad_scheme_ |= format_item_t::tabulation;
|
||||||
|
fpar->argN_ = format_item_t::argN_tabulation;
|
||||||
|
break;
|
||||||
|
case 't':
|
||||||
|
fpar->ref_state_.fill_ = ' ';
|
||||||
|
fpar->pad_scheme_ |= format_item_t::tabulation;
|
||||||
|
fpar->argN_ = format_item_t::argN_tabulation;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'G':
|
||||||
|
fpar->ref_state_.flags_ |= std::ios::uppercase;
|
||||||
|
break;
|
||||||
|
case 'g': // 'g' conversion is default for floats.
|
||||||
|
fpar->ref_state_.flags_ &= ~std::ios::basefield;
|
||||||
|
fpar->ref_state_.flags_ |= std::ios::dec;
|
||||||
|
|
||||||
|
// CLEAR all floatield flags, so stream will CHOOSE
|
||||||
|
fpar->ref_state_.flags_ &= ~std::ios::floatfield;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 'C':
|
||||||
|
case 'c':
|
||||||
|
fpar->truncate_ = 1;
|
||||||
|
break;
|
||||||
|
case 'S':
|
||||||
|
case 's':
|
||||||
|
fpar->truncate_ = fpar->ref_state_.precision_;
|
||||||
|
fpar->ref_state_.precision_ = -1;
|
||||||
|
break;
|
||||||
|
case 'n' :
|
||||||
|
fpar->argN_ = format_item_t::argN_ignored;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
maybe_throw_exception(exceptions);
|
||||||
|
}
|
||||||
|
++i1;
|
||||||
|
|
||||||
|
if( in_brackets )
|
||||||
|
{
|
||||||
|
if( i1<buf.size() && buf[i1]=='|' )
|
||||||
|
{
|
||||||
|
++i1;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
else maybe_throw_exception(exceptions);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // detail namespace
|
||||||
|
} // io namespace
|
||||||
|
|
||||||
|
|
||||||
|
// -----------------------------------------------
|
||||||
|
// format :: parse(..)
|
||||||
|
|
||||||
|
void basic_format::parse(const string_t & buf)
|
||||||
|
// parse the format-string
|
||||||
|
{
|
||||||
|
using namespace std;
|
||||||
|
const char arg_mark = '%';
|
||||||
|
bool ordered_args=true;
|
||||||
|
int max_argN=-1;
|
||||||
|
string_t::size_type i1=0;
|
||||||
|
int num_items=0;
|
||||||
|
|
||||||
|
// A: find upper_bound on num_items and allocates arrays
|
||||||
|
i1=0;
|
||||||
|
while( (i1=buf.find(arg_mark,i1)) != string::npos )
|
||||||
|
{
|
||||||
|
if( i1+1 >= buf.size() ) {
|
||||||
|
if(exceptions() & io::bad_format_string_bit)
|
||||||
|
boost::throw_exception(io::bad_format_string()); // must not end in "bla bla %"
|
||||||
|
else break; // stop there, ignore last '%'
|
||||||
|
}
|
||||||
|
if(buf[i1+1] == buf[i1] ) { i1+=2; continue; } // escaped "%%" / "##"
|
||||||
|
++i1;
|
||||||
|
|
||||||
|
// in case of %N% directives, dont count it double (wastes allocations..) :
|
||||||
|
while(i1 < buf.size() && io::detail::wrap_isdigit(buf[i1],oss_)) ++i1;
|
||||||
|
if( i1 < buf.size() && buf[i1] == arg_mark ) ++ i1;
|
||||||
|
|
||||||
|
++num_items;
|
||||||
|
}
|
||||||
|
items_.assign( num_items, format_item_t() );
|
||||||
|
|
||||||
|
// B: Now the real parsing of the format string :
|
||||||
|
num_items=0;
|
||||||
|
i1 = 0;
|
||||||
|
string_t::size_type i0 = i1;
|
||||||
|
bool special_things=false;
|
||||||
|
int cur_it=0;
|
||||||
|
while( (i1=buf.find(arg_mark,i1)) != string::npos )
|
||||||
|
{
|
||||||
|
string_t & piece = (cur_it==0) ? prefix_ : items_[cur_it-1].appendix_;
|
||||||
|
|
||||||
|
if( buf[i1+1] == buf[i1] ) // escaped mark, '%%'
|
||||||
|
{
|
||||||
|
piece += buf.substr(i0, i1-i0) + buf[i1];
|
||||||
|
i1+=2; i0=i1;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
BOOST_ASSERT( static_cast<unsigned int>(cur_it) < items_.size() || cur_it==0);
|
||||||
|
|
||||||
|
if(i1!=i0) piece += buf.substr(i0, i1-i0);
|
||||||
|
++i1;
|
||||||
|
|
||||||
|
bool parse_ok;
|
||||||
|
parse_ok = io::detail::parse_printf_directive(buf, &i1, &items_[cur_it], oss_, exceptions());
|
||||||
|
if( ! parse_ok ) continue; // the directive will be printed verbatim
|
||||||
|
|
||||||
|
i0=i1;
|
||||||
|
items_[cur_it].compute_states(); // process complex options, like zeropad, into stream params.
|
||||||
|
|
||||||
|
int argN=items_[cur_it].argN_;
|
||||||
|
if(argN == format_item_t::argN_ignored)
|
||||||
|
continue;
|
||||||
|
if(argN ==format_item_t::argN_no_posit)
|
||||||
|
ordered_args=false;
|
||||||
|
else if(argN == format_item_t::argN_tabulation) special_things=true;
|
||||||
|
else if(argN > max_argN) max_argN = argN;
|
||||||
|
++num_items;
|
||||||
|
++cur_it;
|
||||||
|
} // loop on %'s
|
||||||
|
BOOST_ASSERT(cur_it == num_items);
|
||||||
|
|
||||||
|
// store the final piece of string
|
||||||
|
string_t & piece = (cur_it==0) ? prefix_ : items_[cur_it-1].appendix_;
|
||||||
|
piece += buf.substr(i0);
|
||||||
|
|
||||||
|
if( !ordered_args)
|
||||||
|
{
|
||||||
|
if(max_argN >= 0 ) // dont mix positional with non-positionnal directives
|
||||||
|
{
|
||||||
|
if(exceptions() & io::bad_format_string_bit)
|
||||||
|
boost::throw_exception(io::bad_format_string());
|
||||||
|
// else do nothing. => positionnal arguments are processed as non-positionnal
|
||||||
|
}
|
||||||
|
// set things like it would have been with positional directives :
|
||||||
|
int non_ordered_items = 0;
|
||||||
|
for(int i=0; i< num_items; ++i)
|
||||||
|
if(items_[i].argN_ == format_item_t::argN_no_posit)
|
||||||
|
{
|
||||||
|
items_[i].argN_ = non_ordered_items;
|
||||||
|
++non_ordered_items;
|
||||||
|
}
|
||||||
|
max_argN = non_ordered_items-1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// C: set some member data :
|
||||||
|
items_.resize(num_items);
|
||||||
|
|
||||||
|
if(special_things) style_ |= special_needs;
|
||||||
|
num_args_ = max_argN + 1;
|
||||||
|
if(ordered_args) style_ |= ordered;
|
||||||
|
else style_ &= ~ordered;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace boost
|
||||||
|
|
||||||
|
|
||||||
|
#endif // BOOST_FORMAT_PARSING_HPP
|
|
@ -0,0 +1,47 @@
|
||||||
|
#ifndef BOOST_THROW_EXCEPTION_HPP_INCLUDED
|
||||||
|
#define BOOST_THROW_EXCEPTION_HPP_INCLUDED
|
||||||
|
|
||||||
|
// MS compatible compilers support #pragma once
|
||||||
|
|
||||||
|
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
|
||||||
|
# pragma once
|
||||||
|
#endif
|
||||||
|
|
||||||
|
//
|
||||||
|
// boost/throw_exception.hpp
|
||||||
|
//
|
||||||
|
// Copyright (c) 2002 Peter Dimov and Multi Media Ltd.
|
||||||
|
//
|
||||||
|
// Permission to copy, use, modify, sell and distribute this software
|
||||||
|
// is granted provided this copyright notice appears in all copies.
|
||||||
|
// This software is provided "as is" without express or implied
|
||||||
|
// warranty, and with no claim as to its suitability for any purpose.
|
||||||
|
//
|
||||||
|
// http://www.boost.org/libs/utility/throw_exception.html
|
||||||
|
//
|
||||||
|
|
||||||
|
//#include <boost/config.hpp>
|
||||||
|
|
||||||
|
#ifdef BOOST_NO_EXCEPTIONS
|
||||||
|
# include <exception>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace boost
|
||||||
|
{
|
||||||
|
|
||||||
|
#ifdef BOOST_NO_EXCEPTIONS
|
||||||
|
|
||||||
|
void throw_exception(std::exception const & e); // user defined
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
template<class E> void throw_exception(E const & e)
|
||||||
|
{
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
} // namespace boost
|
||||||
|
|
||||||
|
#endif // #ifndef BOOST_THROW_EXCEPTION_HPP_INCLUDED
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,278 @@
|
||||||
|
#include "derivations.hh"
|
||||||
|
#include "store-api.hh"
|
||||||
|
#include "globals.hh"
|
||||||
|
#include "util.hh"
|
||||||
|
#include "misc.hh"
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
void DerivationOutput::parseHashInfo(bool & recursive, HashType & hashType, Hash & hash) const
|
||||||
|
{
|
||||||
|
recursive = false;
|
||||||
|
string algo = hashAlgo;
|
||||||
|
|
||||||
|
if (string(algo, 0, 2) == "r:") {
|
||||||
|
recursive = true;
|
||||||
|
algo = string(algo, 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
hashType = parseHashType(algo);
|
||||||
|
if (hashType == htUnknown)
|
||||||
|
throw Error(format("unknown hash algorithm `%1%'") % algo);
|
||||||
|
|
||||||
|
hash = parseHash(hashType, this->hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Path writeDerivation(StoreAPI & store,
|
||||||
|
const Derivation & drv, const string & name, bool repair)
|
||||||
|
{
|
||||||
|
PathSet references;
|
||||||
|
references.insert(drv.inputSrcs.begin(), drv.inputSrcs.end());
|
||||||
|
foreach (DerivationInputs::const_iterator, i, drv.inputDrvs)
|
||||||
|
references.insert(i->first);
|
||||||
|
/* Note that the outputs of a derivation are *not* references
|
||||||
|
(that can be missing (of course) and should not necessarily be
|
||||||
|
held during a garbage collection). */
|
||||||
|
string suffix = name + drvExtension;
|
||||||
|
string contents = unparseDerivation(drv);
|
||||||
|
return settings.readOnlyMode
|
||||||
|
? computeStorePathForText(suffix, contents, references)
|
||||||
|
: store.addTextToStore(suffix, contents, references, repair);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static Path parsePath(std::istream & str)
|
||||||
|
{
|
||||||
|
string s = parseString(str);
|
||||||
|
if (s.size() == 0 || s[0] != '/')
|
||||||
|
throw Error(format("bad path `%1%' in derivation") % s);
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static StringSet parseStrings(std::istream & str, bool arePaths)
|
||||||
|
{
|
||||||
|
StringSet res;
|
||||||
|
while (!endOfList(str))
|
||||||
|
res.insert(arePaths ? parsePath(str) : parseString(str));
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Derivation parseDerivation(const string & s)
|
||||||
|
{
|
||||||
|
Derivation drv;
|
||||||
|
std::istringstream str(s);
|
||||||
|
expect(str, "Derive([");
|
||||||
|
|
||||||
|
/* Parse the list of outputs. */
|
||||||
|
while (!endOfList(str)) {
|
||||||
|
DerivationOutput out;
|
||||||
|
expect(str, "("); string id = parseString(str);
|
||||||
|
expect(str, ","); out.path = parsePath(str);
|
||||||
|
expect(str, ","); out.hashAlgo = parseString(str);
|
||||||
|
expect(str, ","); out.hash = parseString(str);
|
||||||
|
expect(str, ")");
|
||||||
|
drv.outputs[id] = out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Parse the list of input derivations. */
|
||||||
|
expect(str, ",[");
|
||||||
|
while (!endOfList(str)) {
|
||||||
|
expect(str, "(");
|
||||||
|
Path drvPath = parsePath(str);
|
||||||
|
expect(str, ",[");
|
||||||
|
drv.inputDrvs[drvPath] = parseStrings(str, false);
|
||||||
|
expect(str, ")");
|
||||||
|
}
|
||||||
|
|
||||||
|
expect(str, ",["); drv.inputSrcs = parseStrings(str, true);
|
||||||
|
expect(str, ","); drv.platform = parseString(str);
|
||||||
|
expect(str, ","); drv.builder = parseString(str);
|
||||||
|
|
||||||
|
/* Parse the builder arguments. */
|
||||||
|
expect(str, ",[");
|
||||||
|
while (!endOfList(str))
|
||||||
|
drv.args.push_back(parseString(str));
|
||||||
|
|
||||||
|
/* Parse the environment variables. */
|
||||||
|
expect(str, ",[");
|
||||||
|
while (!endOfList(str)) {
|
||||||
|
expect(str, "("); string name = parseString(str);
|
||||||
|
expect(str, ","); string value = parseString(str);
|
||||||
|
expect(str, ")");
|
||||||
|
drv.env[name] = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
expect(str, ")");
|
||||||
|
return drv;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void printString(string & res, const string & s)
|
||||||
|
{
|
||||||
|
res += '"';
|
||||||
|
for (const char * i = s.c_str(); *i; i++)
|
||||||
|
if (*i == '\"' || *i == '\\') { res += "\\"; res += *i; }
|
||||||
|
else if (*i == '\n') res += "\\n";
|
||||||
|
else if (*i == '\r') res += "\\r";
|
||||||
|
else if (*i == '\t') res += "\\t";
|
||||||
|
else res += *i;
|
||||||
|
res += '"';
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<class ForwardIterator>
|
||||||
|
static void printStrings(string & res, ForwardIterator i, ForwardIterator j)
|
||||||
|
{
|
||||||
|
res += '[';
|
||||||
|
bool first = true;
|
||||||
|
for ( ; i != j; ++i) {
|
||||||
|
if (first) first = false; else res += ',';
|
||||||
|
printString(res, *i);
|
||||||
|
}
|
||||||
|
res += ']';
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
string unparseDerivation(const Derivation & drv)
|
||||||
|
{
|
||||||
|
string s;
|
||||||
|
s.reserve(65536);
|
||||||
|
s += "Derive([";
|
||||||
|
|
||||||
|
bool first = true;
|
||||||
|
foreach (DerivationOutputs::const_iterator, i, drv.outputs) {
|
||||||
|
if (first) first = false; else s += ',';
|
||||||
|
s += '('; printString(s, i->first);
|
||||||
|
s += ','; printString(s, i->second.path);
|
||||||
|
s += ','; printString(s, i->second.hashAlgo);
|
||||||
|
s += ','; printString(s, i->second.hash);
|
||||||
|
s += ')';
|
||||||
|
}
|
||||||
|
|
||||||
|
s += "],[";
|
||||||
|
first = true;
|
||||||
|
foreach (DerivationInputs::const_iterator, i, drv.inputDrvs) {
|
||||||
|
if (first) first = false; else s += ',';
|
||||||
|
s += '('; printString(s, i->first);
|
||||||
|
s += ','; printStrings(s, i->second.begin(), i->second.end());
|
||||||
|
s += ')';
|
||||||
|
}
|
||||||
|
|
||||||
|
s += "],";
|
||||||
|
printStrings(s, drv.inputSrcs.begin(), drv.inputSrcs.end());
|
||||||
|
|
||||||
|
s += ','; printString(s, drv.platform);
|
||||||
|
s += ','; printString(s, drv.builder);
|
||||||
|
s += ','; printStrings(s, drv.args.begin(), drv.args.end());
|
||||||
|
|
||||||
|
s += ",[";
|
||||||
|
first = true;
|
||||||
|
foreach (StringPairs::const_iterator, i, drv.env) {
|
||||||
|
if (first) first = false; else s += ',';
|
||||||
|
s += '('; printString(s, i->first);
|
||||||
|
s += ','; printString(s, i->second);
|
||||||
|
s += ')';
|
||||||
|
}
|
||||||
|
|
||||||
|
s += "])";
|
||||||
|
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool isDerivation(const string & fileName)
|
||||||
|
{
|
||||||
|
return hasSuffix(fileName, drvExtension);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool isFixedOutputDrv(const Derivation & drv)
|
||||||
|
{
|
||||||
|
return drv.outputs.size() == 1 &&
|
||||||
|
drv.outputs.begin()->first == "out" &&
|
||||||
|
drv.outputs.begin()->second.hash != "";
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
DrvHashes drvHashes;
|
||||||
|
|
||||||
|
|
||||||
|
/* Returns the hash of a derivation modulo fixed-output
|
||||||
|
subderivations. A fixed-output derivation is a derivation with one
|
||||||
|
output (`out') for which an expected hash and hash algorithm are
|
||||||
|
specified (using the `outputHash' and `outputHashAlgo'
|
||||||
|
attributes). We don't want changes to such derivations to
|
||||||
|
propagate upwards through the dependency graph, changing output
|
||||||
|
paths everywhere.
|
||||||
|
|
||||||
|
For instance, if we change the url in a call to the `fetchurl'
|
||||||
|
function, we do not want to rebuild everything depending on it
|
||||||
|
(after all, (the hash of) the file being downloaded is unchanged).
|
||||||
|
So the *output paths* should not change. On the other hand, the
|
||||||
|
*derivation paths* should change to reflect the new dependency
|
||||||
|
graph.
|
||||||
|
|
||||||
|
That's what this function does: it returns a hash which is just the
|
||||||
|
hash of the derivation ATerm, except that any input derivation
|
||||||
|
paths have been replaced by the result of a recursive call to this
|
||||||
|
function, and that for fixed-output derivations we return a hash of
|
||||||
|
its output path. */
|
||||||
|
Hash hashDerivationModulo(StoreAPI & store, Derivation drv)
|
||||||
|
{
|
||||||
|
/* Return a fixed hash for fixed-output derivations. */
|
||||||
|
if (isFixedOutputDrv(drv)) {
|
||||||
|
DerivationOutputs::const_iterator i = drv.outputs.begin();
|
||||||
|
return hashString(htSHA256, "fixed:out:"
|
||||||
|
+ i->second.hashAlgo + ":"
|
||||||
|
+ i->second.hash + ":"
|
||||||
|
+ i->second.path);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* For other derivations, replace the inputs paths with recursive
|
||||||
|
calls to this function.*/
|
||||||
|
DerivationInputs inputs2;
|
||||||
|
foreach (DerivationInputs::const_iterator, i, drv.inputDrvs) {
|
||||||
|
Hash h = drvHashes[i->first];
|
||||||
|
if (h.type == htUnknown) {
|
||||||
|
assert(store.isValidPath(i->first));
|
||||||
|
Derivation drv2 = parseDerivation(readFile(i->first));
|
||||||
|
h = hashDerivationModulo(store, drv2);
|
||||||
|
drvHashes[i->first] = h;
|
||||||
|
}
|
||||||
|
inputs2[printHash(h)] = i->second;
|
||||||
|
}
|
||||||
|
drv.inputDrvs = inputs2;
|
||||||
|
|
||||||
|
return hashString(htSHA256, unparseDerivation(drv));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
DrvPathWithOutputs parseDrvPathWithOutputs(const string & s)
|
||||||
|
{
|
||||||
|
size_t n = s.find("!");
|
||||||
|
return n == s.npos
|
||||||
|
? DrvPathWithOutputs(s, std::set<string>())
|
||||||
|
: DrvPathWithOutputs(string(s, 0, n), tokenizeString<std::set<string> >(string(s, n + 1), ","));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Path makeDrvPathWithOutputs(const Path & drvPath, const std::set<string> & outputs)
|
||||||
|
{
|
||||||
|
return outputs.empty()
|
||||||
|
? drvPath
|
||||||
|
: drvPath + "!" + concatStringsSep(",", outputs);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool wantOutput(const string & output, const std::set<string> & wanted)
|
||||||
|
{
|
||||||
|
return wanted.empty() || wanted.find(output) != wanted.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,93 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "types.hh"
|
||||||
|
#include "hash.hh"
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
/* Extension of derivations in the Nix store. */
|
||||||
|
const string drvExtension = ".drv";
|
||||||
|
|
||||||
|
|
||||||
|
/* Abstract syntax of derivations. */
|
||||||
|
|
||||||
|
struct DerivationOutput
|
||||||
|
{
|
||||||
|
Path path;
|
||||||
|
string hashAlgo; /* hash used for expected hash computation */
|
||||||
|
string hash; /* expected hash, may be null */
|
||||||
|
DerivationOutput()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
DerivationOutput(Path path, string hashAlgo, string hash)
|
||||||
|
{
|
||||||
|
this->path = path;
|
||||||
|
this->hashAlgo = hashAlgo;
|
||||||
|
this->hash = hash;
|
||||||
|
}
|
||||||
|
void parseHashInfo(bool & recursive, HashType & hashType, Hash & hash) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef std::map<string, DerivationOutput> DerivationOutputs;
|
||||||
|
|
||||||
|
/* For inputs that are sub-derivations, we specify exactly which
|
||||||
|
output IDs we are interested in. */
|
||||||
|
typedef std::map<Path, StringSet> DerivationInputs;
|
||||||
|
|
||||||
|
typedef std::map<string, string> StringPairs;
|
||||||
|
|
||||||
|
struct Derivation
|
||||||
|
{
|
||||||
|
DerivationOutputs outputs; /* keyed on symbolic IDs */
|
||||||
|
DerivationInputs inputDrvs; /* inputs that are sub-derivations */
|
||||||
|
PathSet inputSrcs; /* inputs that are sources */
|
||||||
|
string platform;
|
||||||
|
Path builder;
|
||||||
|
Strings args;
|
||||||
|
StringPairs env;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class StoreAPI;
|
||||||
|
|
||||||
|
|
||||||
|
/* Write a derivation to the Nix store, and return its path. */
|
||||||
|
Path writeDerivation(StoreAPI & store,
|
||||||
|
const Derivation & drv, const string & name, bool repair = false);
|
||||||
|
|
||||||
|
/* Parse a derivation. */
|
||||||
|
Derivation parseDerivation(const string & s);
|
||||||
|
|
||||||
|
/* Print a derivation. */
|
||||||
|
string unparseDerivation(const Derivation & drv);
|
||||||
|
|
||||||
|
/* Check whether a file name ends with the extensions for
|
||||||
|
derivations. */
|
||||||
|
bool isDerivation(const string & fileName);
|
||||||
|
|
||||||
|
/* Return true iff this is a fixed-output derivation. */
|
||||||
|
bool isFixedOutputDrv(const Derivation & drv);
|
||||||
|
|
||||||
|
Hash hashDerivationModulo(StoreAPI & store, Derivation drv);
|
||||||
|
|
||||||
|
/* Memoisation of hashDerivationModulo(). */
|
||||||
|
typedef std::map<Path, Hash> DrvHashes;
|
||||||
|
|
||||||
|
extern DrvHashes drvHashes;
|
||||||
|
|
||||||
|
/* Split a string specifying a derivation and a set of outputs
|
||||||
|
(/nix/store/hash-foo!out1,out2,...) into the derivation path and
|
||||||
|
the outputs. */
|
||||||
|
typedef std::pair<string, std::set<string> > DrvPathWithOutputs;
|
||||||
|
DrvPathWithOutputs parseDrvPathWithOutputs(const string & s);
|
||||||
|
|
||||||
|
Path makeDrvPathWithOutputs(const Path & drvPath, const std::set<string> & outputs);
|
||||||
|
|
||||||
|
bool wantOutput(const string & output, const std::set<string> & wanted);
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,748 @@
|
||||||
|
#include "globals.hh"
|
||||||
|
#include "misc.hh"
|
||||||
|
#include "local-store.hh"
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
#include <queue>
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <sys/stat.h>
|
||||||
|
#include <errno.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
static string gcLockName = "gc.lock";
|
||||||
|
static string tempRootsDir = "temproots";
|
||||||
|
static string gcRootsDir = "gcroots";
|
||||||
|
|
||||||
|
|
||||||
|
/* Acquire the global GC lock. This is used to prevent new Nix
|
||||||
|
processes from starting after the temporary root files have been
|
||||||
|
read. To be precise: when they try to create a new temporary root
|
||||||
|
file, they will block until the garbage collector has finished /
|
||||||
|
yielded the GC lock. */
|
||||||
|
int LocalStore::openGCLock(LockType lockType)
|
||||||
|
{
|
||||||
|
Path fnGCLock = (format("%1%/%2%")
|
||||||
|
% settings.nixStateDir % gcLockName).str();
|
||||||
|
|
||||||
|
debug(format("acquiring global GC lock `%1%'") % fnGCLock);
|
||||||
|
|
||||||
|
AutoCloseFD fdGCLock = open(fnGCLock.c_str(), O_RDWR | O_CREAT, 0600);
|
||||||
|
if (fdGCLock == -1)
|
||||||
|
throw SysError(format("opening global GC lock `%1%'") % fnGCLock);
|
||||||
|
closeOnExec(fdGCLock);
|
||||||
|
|
||||||
|
if (!lockFile(fdGCLock, lockType, false)) {
|
||||||
|
printMsg(lvlError, format("waiting for the big garbage collector lock..."));
|
||||||
|
lockFile(fdGCLock, lockType, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* !!! Restrict read permission on the GC root. Otherwise any
|
||||||
|
process that can open the file for reading can DoS the
|
||||||
|
collector. */
|
||||||
|
|
||||||
|
return fdGCLock.borrow();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void makeSymlink(const Path & link, const Path & target)
|
||||||
|
{
|
||||||
|
/* Create directories up to `gcRoot'. */
|
||||||
|
createDirs(dirOf(link));
|
||||||
|
|
||||||
|
/* Create the new symlink. */
|
||||||
|
Path tempLink = (format("%1%.tmp-%2%-%3%")
|
||||||
|
% link % getpid() % rand()).str();
|
||||||
|
createSymlink(target, tempLink);
|
||||||
|
|
||||||
|
/* Atomically replace the old one. */
|
||||||
|
if (rename(tempLink.c_str(), link.c_str()) == -1)
|
||||||
|
throw SysError(format("cannot rename `%1%' to `%2%'")
|
||||||
|
% tempLink % link);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LocalStore::syncWithGC()
|
||||||
|
{
|
||||||
|
AutoCloseFD fdGCLock = openGCLock(ltRead);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LocalStore::addIndirectRoot(const Path & path)
|
||||||
|
{
|
||||||
|
string hash = printHash32(hashString(htSHA1, path));
|
||||||
|
Path realRoot = canonPath((format("%1%/%2%/auto/%3%")
|
||||||
|
% settings.nixStateDir % gcRootsDir % hash).str());
|
||||||
|
makeSymlink(realRoot, path);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Path addPermRoot(StoreAPI & store, const Path & _storePath,
|
||||||
|
const Path & _gcRoot, bool indirect, bool allowOutsideRootsDir)
|
||||||
|
{
|
||||||
|
Path storePath(canonPath(_storePath));
|
||||||
|
Path gcRoot(canonPath(_gcRoot));
|
||||||
|
assertStorePath(storePath);
|
||||||
|
|
||||||
|
if (isInStore(gcRoot))
|
||||||
|
throw Error(format(
|
||||||
|
"creating a garbage collector root (%1%) in the Nix store is forbidden "
|
||||||
|
"(are you running nix-build inside the store?)") % gcRoot);
|
||||||
|
|
||||||
|
if (indirect) {
|
||||||
|
/* Don't clobber the the link if it already exists and doesn't
|
||||||
|
point to the Nix store. */
|
||||||
|
if (pathExists(gcRoot) && (!isLink(gcRoot) || !isInStore(readLink(gcRoot))))
|
||||||
|
throw Error(format("cannot create symlink `%1%'; already exists") % gcRoot);
|
||||||
|
makeSymlink(gcRoot, storePath);
|
||||||
|
store.addIndirectRoot(gcRoot);
|
||||||
|
}
|
||||||
|
|
||||||
|
else {
|
||||||
|
if (!allowOutsideRootsDir) {
|
||||||
|
Path rootsDir = canonPath((format("%1%/%2%") % settings.nixStateDir % gcRootsDir).str());
|
||||||
|
|
||||||
|
if (string(gcRoot, 0, rootsDir.size() + 1) != rootsDir + "/")
|
||||||
|
throw Error(format(
|
||||||
|
"path `%1%' is not a valid garbage collector root; "
|
||||||
|
"it's not in the directory `%2%'")
|
||||||
|
% gcRoot % rootsDir);
|
||||||
|
}
|
||||||
|
|
||||||
|
makeSymlink(gcRoot, storePath);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check that the root can be found by the garbage collector.
|
||||||
|
!!! This can be very slow on machines that have many roots.
|
||||||
|
Instead of reading all the roots, it would be more efficient to
|
||||||
|
check if the root is in a directory in or linked from the
|
||||||
|
gcroots directory. */
|
||||||
|
if (settings.checkRootReachability) {
|
||||||
|
Roots roots = store.findRoots();
|
||||||
|
if (roots.find(gcRoot) == roots.end())
|
||||||
|
printMsg(lvlError,
|
||||||
|
format(
|
||||||
|
"warning: `%1%' is not in a directory where the garbage collector looks for roots; "
|
||||||
|
"therefore, `%2%' might be removed by the garbage collector")
|
||||||
|
% gcRoot % storePath);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Grab the global GC root, causing us to block while a GC is in
|
||||||
|
progress. This prevents the set of permanent roots from
|
||||||
|
increasing while a GC is in progress. */
|
||||||
|
store.syncWithGC();
|
||||||
|
|
||||||
|
return gcRoot;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* The file to which we write our temporary roots. */
|
||||||
|
static Path fnTempRoots;
|
||||||
|
static AutoCloseFD fdTempRoots;
|
||||||
|
|
||||||
|
|
||||||
|
void LocalStore::addTempRoot(const Path & path)
|
||||||
|
{
|
||||||
|
/* Create the temporary roots file for this process. */
|
||||||
|
if (fdTempRoots == -1) {
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
Path dir = (format("%1%/%2%") % settings.nixStateDir % tempRootsDir).str();
|
||||||
|
createDirs(dir);
|
||||||
|
|
||||||
|
fnTempRoots = (format("%1%/%2%")
|
||||||
|
% dir % getpid()).str();
|
||||||
|
|
||||||
|
AutoCloseFD fdGCLock = openGCLock(ltRead);
|
||||||
|
|
||||||
|
if (pathExists(fnTempRoots))
|
||||||
|
/* It *must* be stale, since there can be no two
|
||||||
|
processes with the same pid. */
|
||||||
|
unlink(fnTempRoots.c_str());
|
||||||
|
|
||||||
|
fdTempRoots = openLockFile(fnTempRoots, true);
|
||||||
|
|
||||||
|
fdGCLock.close();
|
||||||
|
|
||||||
|
debug(format("acquiring read lock on `%1%'") % fnTempRoots);
|
||||||
|
lockFile(fdTempRoots, ltRead, true);
|
||||||
|
|
||||||
|
/* Check whether the garbage collector didn't get in our
|
||||||
|
way. */
|
||||||
|
struct stat st;
|
||||||
|
if (fstat(fdTempRoots, &st) == -1)
|
||||||
|
throw SysError(format("statting `%1%'") % fnTempRoots);
|
||||||
|
if (st.st_size == 0) break;
|
||||||
|
|
||||||
|
/* The garbage collector deleted this file before we could
|
||||||
|
get a lock. (It won't delete the file after we get a
|
||||||
|
lock.) Try again. */
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Upgrade the lock to a write lock. This will cause us to block
|
||||||
|
if the garbage collector is holding our lock. */
|
||||||
|
debug(format("acquiring write lock on `%1%'") % fnTempRoots);
|
||||||
|
lockFile(fdTempRoots, ltWrite, true);
|
||||||
|
|
||||||
|
string s = path + '\0';
|
||||||
|
writeFull(fdTempRoots, (const unsigned char *) s.data(), s.size());
|
||||||
|
|
||||||
|
/* Downgrade to a read lock. */
|
||||||
|
debug(format("downgrading to read lock on `%1%'") % fnTempRoots);
|
||||||
|
lockFile(fdTempRoots, ltRead, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void removeTempRoots()
|
||||||
|
{
|
||||||
|
if (fdTempRoots != -1) {
|
||||||
|
fdTempRoots.close();
|
||||||
|
unlink(fnTempRoots.c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Automatically clean up the temporary roots file when we exit. */
|
||||||
|
struct RemoveTempRoots
|
||||||
|
{
|
||||||
|
~RemoveTempRoots()
|
||||||
|
{
|
||||||
|
removeTempRoots();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
static RemoveTempRoots autoRemoveTempRoots __attribute__((unused));
|
||||||
|
|
||||||
|
|
||||||
|
typedef std::shared_ptr<AutoCloseFD> FDPtr;
|
||||||
|
typedef list<FDPtr> FDs;
|
||||||
|
|
||||||
|
|
||||||
|
static void readTempRoots(PathSet & tempRoots, FDs & fds)
|
||||||
|
{
|
||||||
|
/* Read the `temproots' directory for per-process temporary root
|
||||||
|
files. */
|
||||||
|
Strings tempRootFiles = readDirectory(
|
||||||
|
(format("%1%/%2%") % settings.nixStateDir % tempRootsDir).str());
|
||||||
|
|
||||||
|
foreach (Strings::iterator, i, tempRootFiles) {
|
||||||
|
Path path = (format("%1%/%2%/%3%") % settings.nixStateDir % tempRootsDir % *i).str();
|
||||||
|
|
||||||
|
debug(format("reading temporary root file `%1%'") % path);
|
||||||
|
FDPtr fd(new AutoCloseFD(open(path.c_str(), O_RDWR, 0666)));
|
||||||
|
if (*fd == -1) {
|
||||||
|
/* It's okay if the file has disappeared. */
|
||||||
|
if (errno == ENOENT) continue;
|
||||||
|
throw SysError(format("opening temporary roots file `%1%'") % path);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This should work, but doesn't, for some reason. */
|
||||||
|
//FDPtr fd(new AutoCloseFD(openLockFile(path, false)));
|
||||||
|
//if (*fd == -1) continue;
|
||||||
|
|
||||||
|
/* Try to acquire a write lock without blocking. This can
|
||||||
|
only succeed if the owning process has died. In that case
|
||||||
|
we don't care about its temporary roots. */
|
||||||
|
if (lockFile(*fd, ltWrite, false)) {
|
||||||
|
printMsg(lvlError, format("removing stale temporary roots file `%1%'") % path);
|
||||||
|
unlink(path.c_str());
|
||||||
|
writeFull(*fd, (const unsigned char *) "d", 1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Acquire a read lock. This will prevent the owning process
|
||||||
|
from upgrading to a write lock, therefore it will block in
|
||||||
|
addTempRoot(). */
|
||||||
|
debug(format("waiting for read lock on `%1%'") % path);
|
||||||
|
lockFile(*fd, ltRead, true);
|
||||||
|
|
||||||
|
/* Read the entire file. */
|
||||||
|
string contents = readFile(*fd);
|
||||||
|
|
||||||
|
/* Extract the roots. */
|
||||||
|
string::size_type pos = 0, end;
|
||||||
|
|
||||||
|
while ((end = contents.find((char) 0, pos)) != string::npos) {
|
||||||
|
Path root(contents, pos, end - pos);
|
||||||
|
debug(format("got temporary root `%1%'") % root);
|
||||||
|
assertStorePath(root);
|
||||||
|
tempRoots.insert(root);
|
||||||
|
pos = end + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
fds.push_back(fd); /* keep open */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void foundRoot(StoreAPI & store,
|
||||||
|
const Path & path, const Path & target, Roots & roots)
|
||||||
|
{
|
||||||
|
Path storePath = toStorePath(target);
|
||||||
|
if (store.isValidPath(storePath))
|
||||||
|
roots[path] = storePath;
|
||||||
|
else
|
||||||
|
printMsg(lvlInfo, format("skipping invalid root from `%1%' to `%2%'") % path % storePath);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void findRoots(StoreAPI & store, const Path & path, Roots & roots)
|
||||||
|
{
|
||||||
|
try {
|
||||||
|
|
||||||
|
struct stat st = lstat(path);
|
||||||
|
|
||||||
|
if (S_ISDIR(st.st_mode)) {
|
||||||
|
Strings names = readDirectory(path);
|
||||||
|
foreach (Strings::iterator, i, names)
|
||||||
|
findRoots(store, path + "/" + *i, roots);
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (S_ISLNK(st.st_mode)) {
|
||||||
|
Path target = readLink(path);
|
||||||
|
if (isInStore(target))
|
||||||
|
foundRoot(store, path, target, roots);
|
||||||
|
|
||||||
|
/* Handle indirect roots. */
|
||||||
|
else {
|
||||||
|
target = absPath(target, dirOf(path));
|
||||||
|
if (!pathExists(target)) {
|
||||||
|
if (isInDir(path, settings.nixStateDir + "/" + gcRootsDir + "/auto")) {
|
||||||
|
printMsg(lvlInfo, format("removing stale link from `%1%' to `%2%'") % path % target);
|
||||||
|
unlink(path.c_str());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
struct stat st2 = lstat(target);
|
||||||
|
if (!S_ISLNK(st2.st_mode)) return;
|
||||||
|
Path target2 = readLink(target);
|
||||||
|
if (isInStore(target2)) foundRoot(store, target, target2, roots);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
catch (SysError & e) {
|
||||||
|
/* We only ignore permanent failures. */
|
||||||
|
if (e.errNo == EACCES || e.errNo == ENOENT || e.errNo == ENOTDIR)
|
||||||
|
printMsg(lvlInfo, format("cannot read potential root `%1%'") % path);
|
||||||
|
else
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Roots LocalStore::findRoots()
|
||||||
|
{
|
||||||
|
Roots roots;
|
||||||
|
|
||||||
|
/* Process direct roots in {gcroots,manifests,profiles}. */
|
||||||
|
nix::findRoots(*this, settings.nixStateDir + "/" + gcRootsDir, roots);
|
||||||
|
nix::findRoots(*this, settings.nixStateDir + "/manifests", roots);
|
||||||
|
nix::findRoots(*this, settings.nixStateDir + "/profiles", roots);
|
||||||
|
|
||||||
|
return roots;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void addAdditionalRoots(StoreAPI & store, PathSet & roots)
|
||||||
|
{
|
||||||
|
Path rootFinder = getEnv("NIX_ROOT_FINDER",
|
||||||
|
settings.nixLibexecDir + "/guix/list-runtime-roots");
|
||||||
|
|
||||||
|
if (rootFinder.empty()) return;
|
||||||
|
|
||||||
|
debug(format("executing `%1%' to find additional roots") % rootFinder);
|
||||||
|
|
||||||
|
string result = runProgram(rootFinder);
|
||||||
|
|
||||||
|
StringSet paths = tokenizeString<StringSet>(result, "\n");
|
||||||
|
|
||||||
|
foreach (StringSet::iterator, i, paths) {
|
||||||
|
if (isInStore(*i)) {
|
||||||
|
Path path = toStorePath(*i);
|
||||||
|
if (roots.find(path) == roots.end() && store.isValidPath(path)) {
|
||||||
|
debug(format("got additional root `%1%'") % path);
|
||||||
|
roots.insert(path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
struct GCLimitReached { };
|
||||||
|
|
||||||
|
|
||||||
|
struct LocalStore::GCState
|
||||||
|
{
|
||||||
|
GCOptions options;
|
||||||
|
GCResults & results;
|
||||||
|
PathSet roots;
|
||||||
|
PathSet tempRoots;
|
||||||
|
PathSet dead;
|
||||||
|
PathSet alive;
|
||||||
|
bool gcKeepOutputs;
|
||||||
|
bool gcKeepDerivations;
|
||||||
|
unsigned long long bytesInvalidated;
|
||||||
|
Path trashDir;
|
||||||
|
bool shouldDelete;
|
||||||
|
GCState(GCResults & results_) : results(results_), bytesInvalidated(0) { }
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
bool LocalStore::isActiveTempFile(const GCState & state,
|
||||||
|
const Path & path, const string & suffix)
|
||||||
|
{
|
||||||
|
return hasSuffix(path, suffix)
|
||||||
|
&& state.tempRoots.find(string(path, 0, path.size() - suffix.size())) != state.tempRoots.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LocalStore::deleteGarbage(GCState & state, const Path & path)
|
||||||
|
{
|
||||||
|
unsigned long long bytesFreed;
|
||||||
|
deletePath(path, bytesFreed);
|
||||||
|
state.results.bytesFreed += bytesFreed;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LocalStore::deletePathRecursive(GCState & state, const Path & path)
|
||||||
|
{
|
||||||
|
checkInterrupt();
|
||||||
|
|
||||||
|
unsigned long long size = 0;
|
||||||
|
|
||||||
|
if (isValidPath(path)) {
|
||||||
|
PathSet referrers;
|
||||||
|
queryReferrers(path, referrers);
|
||||||
|
foreach (PathSet::iterator, i, referrers)
|
||||||
|
if (*i != path) deletePathRecursive(state, *i);
|
||||||
|
size = queryPathInfo(path).narSize;
|
||||||
|
invalidatePathChecked(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct stat st;
|
||||||
|
if (lstat(path.c_str(), &st)) {
|
||||||
|
if (errno == ENOENT) return;
|
||||||
|
throw SysError(format("getting status of %1%") % path);
|
||||||
|
}
|
||||||
|
|
||||||
|
printMsg(lvlInfo, format("deleting `%1%'") % path);
|
||||||
|
|
||||||
|
state.results.paths.insert(path);
|
||||||
|
|
||||||
|
/* If the path is not a regular file or symlink, move it to the
|
||||||
|
trash directory. The move is to ensure that later (when we're
|
||||||
|
not holding the global GC lock) we can delete the path without
|
||||||
|
being afraid that the path has become alive again. Otherwise
|
||||||
|
delete it right away. */
|
||||||
|
if (S_ISDIR(st.st_mode)) {
|
||||||
|
// Estimate the amount freed using the narSize field. FIXME:
|
||||||
|
// if the path was not valid, need to determine the actual
|
||||||
|
// size.
|
||||||
|
state.bytesInvalidated += size;
|
||||||
|
// Mac OS X cannot rename directories if they are read-only.
|
||||||
|
if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1)
|
||||||
|
throw SysError(format("making `%1%' writable") % path);
|
||||||
|
Path tmp = state.trashDir + "/" + baseNameOf(path);
|
||||||
|
if (rename(path.c_str(), tmp.c_str()))
|
||||||
|
throw SysError(format("unable to rename `%1%' to `%2%'") % path % tmp);
|
||||||
|
} else
|
||||||
|
deleteGarbage(state, path);
|
||||||
|
|
||||||
|
if (state.results.bytesFreed + state.bytesInvalidated > state.options.maxFreed) {
|
||||||
|
printMsg(lvlInfo, format("deleted or invalidated more than %1% bytes; stopping") % state.options.maxFreed);
|
||||||
|
throw GCLimitReached();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool LocalStore::canReachRoot(GCState & state, PathSet & visited, const Path & path)
|
||||||
|
{
|
||||||
|
if (visited.find(path) != visited.end()) return false;
|
||||||
|
|
||||||
|
if (state.alive.find(path) != state.alive.end()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state.dead.find(path) != state.dead.end()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state.roots.find(path) != state.roots.end()) {
|
||||||
|
printMsg(lvlDebug, format("cannot delete `%1%' because it's a root") % path);
|
||||||
|
state.alive.insert(path);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
visited.insert(path);
|
||||||
|
|
||||||
|
if (!isValidPath(path)) return false;
|
||||||
|
|
||||||
|
PathSet incoming;
|
||||||
|
|
||||||
|
/* Don't delete this path if any of its referrers are alive. */
|
||||||
|
queryReferrers(path, incoming);
|
||||||
|
|
||||||
|
/* If gc-keep-derivations is set and this is a derivation, then
|
||||||
|
don't delete the derivation if any of the outputs are alive. */
|
||||||
|
if (state.gcKeepDerivations && isDerivation(path)) {
|
||||||
|
PathSet outputs = queryDerivationOutputs(path);
|
||||||
|
foreach (PathSet::iterator, i, outputs)
|
||||||
|
if (isValidPath(*i) && queryDeriver(*i) == path)
|
||||||
|
incoming.insert(*i);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If gc-keep-outputs is set, then don't delete this path if there
|
||||||
|
are derivers of this path that are not garbage. */
|
||||||
|
if (state.gcKeepOutputs) {
|
||||||
|
PathSet derivers = queryValidDerivers(path);
|
||||||
|
foreach (PathSet::iterator, i, derivers)
|
||||||
|
incoming.insert(*i);
|
||||||
|
}
|
||||||
|
|
||||||
|
foreach (PathSet::iterator, i, incoming)
|
||||||
|
if (*i != path)
|
||||||
|
if (canReachRoot(state, visited, *i)) {
|
||||||
|
state.alive.insert(path);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LocalStore::tryToDelete(GCState & state, const Path & path)
|
||||||
|
{
|
||||||
|
checkInterrupt();
|
||||||
|
|
||||||
|
if (path == linksDir || path == state.trashDir) return;
|
||||||
|
|
||||||
|
startNest(nest, lvlDebug, format("considering whether to delete `%1%'") % path);
|
||||||
|
|
||||||
|
if (!isValidPath(path)) {
|
||||||
|
/* A lock file belonging to a path that we're building right
|
||||||
|
now isn't garbage. */
|
||||||
|
if (isActiveTempFile(state, path, ".lock")) return;
|
||||||
|
|
||||||
|
/* Don't delete .chroot directories for derivations that are
|
||||||
|
currently being built. */
|
||||||
|
if (isActiveTempFile(state, path, ".chroot")) return;
|
||||||
|
}
|
||||||
|
|
||||||
|
PathSet visited;
|
||||||
|
|
||||||
|
if (canReachRoot(state, visited, path)) {
|
||||||
|
printMsg(lvlDebug, format("cannot delete `%1%' because it's still reachable") % path);
|
||||||
|
} else {
|
||||||
|
/* No path we visited was a root, so everything is garbage.
|
||||||
|
But we only delete ‘path’ and its referrers here so that
|
||||||
|
‘nix-store --delete’ doesn't have the unexpected effect of
|
||||||
|
recursing into derivations and outputs. */
|
||||||
|
state.dead.insert(visited.begin(), visited.end());
|
||||||
|
if (state.shouldDelete)
|
||||||
|
deletePathRecursive(state, path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Unlink all files in /nix/store/.links that have a link count of 1,
|
||||||
|
which indicates that there are no other links and so they can be
|
||||||
|
safely deleted. FIXME: race condition with optimisePath(): we
|
||||||
|
might see a link count of 1 just before optimisePath() increases
|
||||||
|
the link count. */
|
||||||
|
void LocalStore::removeUnusedLinks(const GCState & state)
|
||||||
|
{
|
||||||
|
AutoCloseDir dir = opendir(linksDir.c_str());
|
||||||
|
if (!dir) throw SysError(format("opening directory `%1%'") % linksDir);
|
||||||
|
|
||||||
|
long long actualSize = 0, unsharedSize = 0;
|
||||||
|
|
||||||
|
struct dirent * dirent;
|
||||||
|
while (errno = 0, dirent = readdir(dir)) {
|
||||||
|
checkInterrupt();
|
||||||
|
string name = dirent->d_name;
|
||||||
|
if (name == "." || name == "..") continue;
|
||||||
|
Path path = linksDir + "/" + name;
|
||||||
|
|
||||||
|
struct stat st;
|
||||||
|
if (lstat(path.c_str(), &st) == -1)
|
||||||
|
throw SysError(format("statting `%1%'") % path);
|
||||||
|
|
||||||
|
if (st.st_nlink != 1) {
|
||||||
|
unsigned long long size = st.st_blocks * 512ULL;
|
||||||
|
actualSize += size;
|
||||||
|
unsharedSize += (st.st_nlink - 1) * size;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
printMsg(lvlTalkative, format("deleting unused link `%1%'") % path);
|
||||||
|
|
||||||
|
if (unlink(path.c_str()) == -1)
|
||||||
|
throw SysError(format("deleting `%1%'") % path);
|
||||||
|
|
||||||
|
state.results.bytesFreed += st.st_blocks * 512;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct stat st;
|
||||||
|
if (stat(linksDir.c_str(), &st) == -1)
|
||||||
|
throw SysError(format("statting `%1%'") % linksDir);
|
||||||
|
long long overhead = st.st_blocks * 512ULL;
|
||||||
|
|
||||||
|
printMsg(lvlInfo, format("note: currently hard linking saves %.2f MiB")
|
||||||
|
% ((unsharedSize - actualSize - overhead) / (1024.0 * 1024.0)));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LocalStore::collectGarbage(const GCOptions & options, GCResults & results)
|
||||||
|
{
|
||||||
|
GCState state(results);
|
||||||
|
state.options = options;
|
||||||
|
state.trashDir = settings.nixStore + "/trash";
|
||||||
|
state.gcKeepOutputs = settings.gcKeepOutputs;
|
||||||
|
state.gcKeepDerivations = settings.gcKeepDerivations;
|
||||||
|
|
||||||
|
/* Using `--ignore-liveness' with `--delete' can have unintended
|
||||||
|
consequences if `gc-keep-outputs' or `gc-keep-derivations' are
|
||||||
|
true (the garbage collector will recurse into deleting the
|
||||||
|
outputs or derivers, respectively). So disable them. */
|
||||||
|
if (options.action == GCOptions::gcDeleteSpecific && options.ignoreLiveness) {
|
||||||
|
state.gcKeepOutputs = false;
|
||||||
|
state.gcKeepDerivations = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
state.shouldDelete = options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific;
|
||||||
|
|
||||||
|
/* Acquire the global GC root. This prevents
|
||||||
|
a) New roots from being added.
|
||||||
|
b) Processes from creating new temporary root files. */
|
||||||
|
AutoCloseFD fdGCLock = openGCLock(ltWrite);
|
||||||
|
|
||||||
|
/* Find the roots. Since we've grabbed the GC lock, the set of
|
||||||
|
permanent roots cannot increase now. */
|
||||||
|
printMsg(lvlError, format("finding garbage collector roots..."));
|
||||||
|
Roots rootMap = options.ignoreLiveness ? Roots() : findRoots();
|
||||||
|
|
||||||
|
foreach (Roots::iterator, i, rootMap) state.roots.insert(i->second);
|
||||||
|
|
||||||
|
/* Add additional roots returned by the program specified by the
|
||||||
|
NIX_ROOT_FINDER environment variable. This is typically used
|
||||||
|
to add running programs to the set of roots (to prevent them
|
||||||
|
from being garbage collected). */
|
||||||
|
if (!options.ignoreLiveness)
|
||||||
|
addAdditionalRoots(*this, state.roots);
|
||||||
|
|
||||||
|
/* Read the temporary roots. This acquires read locks on all
|
||||||
|
per-process temporary root files. So after this point no paths
|
||||||
|
can be added to the set of temporary roots. */
|
||||||
|
FDs fds;
|
||||||
|
readTempRoots(state.tempRoots, fds);
|
||||||
|
state.roots.insert(state.tempRoots.begin(), state.tempRoots.end());
|
||||||
|
|
||||||
|
/* After this point the set of roots or temporary roots cannot
|
||||||
|
increase, since we hold locks on everything. So everything
|
||||||
|
that is not reachable from `roots'. */
|
||||||
|
|
||||||
|
if (state.shouldDelete) {
|
||||||
|
if (pathExists(state.trashDir)) deleteGarbage(state, state.trashDir);
|
||||||
|
createDirs(state.trashDir);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Now either delete all garbage paths, or just the specified
|
||||||
|
paths (for gcDeleteSpecific). */
|
||||||
|
|
||||||
|
if (options.action == GCOptions::gcDeleteSpecific) {
|
||||||
|
|
||||||
|
foreach (PathSet::iterator, i, options.pathsToDelete) {
|
||||||
|
assertStorePath(*i);
|
||||||
|
tryToDelete(state, *i);
|
||||||
|
if (state.dead.find(*i) == state.dead.end())
|
||||||
|
throw Error(format("cannot delete path `%1%' since it is still alive") % *i);
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if (options.maxFreed > 0) {
|
||||||
|
|
||||||
|
if (state.shouldDelete)
|
||||||
|
printMsg(lvlError, format("deleting garbage..."));
|
||||||
|
else
|
||||||
|
printMsg(lvlError, format("determining live/dead paths..."));
|
||||||
|
|
||||||
|
try {
|
||||||
|
|
||||||
|
AutoCloseDir dir = opendir(settings.nixStore.c_str());
|
||||||
|
if (!dir) throw SysError(format("opening directory `%1%'") % settings.nixStore);
|
||||||
|
|
||||||
|
/* Read the store and immediately delete all paths that
|
||||||
|
aren't valid. When using --max-freed etc., deleting
|
||||||
|
invalid paths is preferred over deleting unreachable
|
||||||
|
paths, since unreachable paths could become reachable
|
||||||
|
again. We don't use readDirectory() here so that GCing
|
||||||
|
can start faster. */
|
||||||
|
Paths entries;
|
||||||
|
struct dirent * dirent;
|
||||||
|
while (errno = 0, dirent = readdir(dir)) {
|
||||||
|
checkInterrupt();
|
||||||
|
string name = dirent->d_name;
|
||||||
|
if (name == "." || name == "..") continue;
|
||||||
|
Path path = settings.nixStore + "/" + name;
|
||||||
|
if (isValidPath(path))
|
||||||
|
entries.push_back(path);
|
||||||
|
else
|
||||||
|
tryToDelete(state, path);
|
||||||
|
}
|
||||||
|
|
||||||
|
dir.close();
|
||||||
|
|
||||||
|
/* Now delete the unreachable valid paths. Randomise the
|
||||||
|
order in which we delete entries to make the collector
|
||||||
|
less biased towards deleting paths that come
|
||||||
|
alphabetically first (e.g. /nix/store/000...). This
|
||||||
|
matters when using --max-freed etc. */
|
||||||
|
vector<Path> entries_(entries.begin(), entries.end());
|
||||||
|
random_shuffle(entries_.begin(), entries_.end());
|
||||||
|
|
||||||
|
foreach (vector<Path>::iterator, i, entries_)
|
||||||
|
tryToDelete(state, *i);
|
||||||
|
|
||||||
|
} catch (GCLimitReached & e) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state.options.action == GCOptions::gcReturnLive) {
|
||||||
|
state.results.paths = state.alive;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state.options.action == GCOptions::gcReturnDead) {
|
||||||
|
state.results.paths = state.dead;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Allow other processes to add to the store from here on. */
|
||||||
|
fdGCLock.close();
|
||||||
|
fds.clear();
|
||||||
|
|
||||||
|
/* Delete the trash directory. */
|
||||||
|
printMsg(lvlInfo, format("deleting `%1%'") % state.trashDir);
|
||||||
|
deleteGarbage(state, state.trashDir);
|
||||||
|
|
||||||
|
/* Clean up the links directory. */
|
||||||
|
if (options.action == GCOptions::gcDeleteDead || options.action == GCOptions::gcDeleteSpecific) {
|
||||||
|
printMsg(lvlError, format("deleting unused links..."));
|
||||||
|
removeUnusedLinks(state);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* While we're at it, vacuum the database. */
|
||||||
|
if (options.action == GCOptions::gcDeleteDead) vacuumDB();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,240 @@
|
||||||
|
#include "config.h"
|
||||||
|
|
||||||
|
#include "globals.hh"
|
||||||
|
#include "util.hh"
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <algorithm>
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
/* The default location of the daemon socket, relative to nixStateDir.
|
||||||
|
The socket is in a directory to allow you to control access to the
|
||||||
|
Nix daemon by setting the mode/ownership of the directory
|
||||||
|
appropriately. (This wouldn't work on the socket itself since it
|
||||||
|
must be deleted and recreated on startup.) */
|
||||||
|
#define DEFAULT_SOCKET_PATH "/daemon-socket/socket"
|
||||||
|
|
||||||
|
|
||||||
|
Settings settings;
|
||||||
|
|
||||||
|
|
||||||
|
Settings::Settings()
|
||||||
|
{
|
||||||
|
keepFailed = false;
|
||||||
|
keepGoing = false;
|
||||||
|
tryFallback = false;
|
||||||
|
buildVerbosity = lvlError;
|
||||||
|
maxBuildJobs = 1;
|
||||||
|
buildCores = 1;
|
||||||
|
readOnlyMode = false;
|
||||||
|
thisSystem = SYSTEM;
|
||||||
|
maxSilentTime = 0;
|
||||||
|
buildTimeout = 0;
|
||||||
|
useBuildHook = true;
|
||||||
|
printBuildTrace = false;
|
||||||
|
reservedSize = 1024 * 1024;
|
||||||
|
fsyncMetadata = true;
|
||||||
|
useSQLiteWAL = true;
|
||||||
|
syncBeforeRegistering = false;
|
||||||
|
useSubstitutes = true;
|
||||||
|
useChroot = false;
|
||||||
|
useSshSubstituter = false;
|
||||||
|
impersonateLinux26 = false;
|
||||||
|
keepLog = true;
|
||||||
|
compressLog = true;
|
||||||
|
maxLogSize = 0;
|
||||||
|
cacheFailure = false;
|
||||||
|
pollInterval = 5;
|
||||||
|
checkRootReachability = false;
|
||||||
|
gcKeepOutputs = false;
|
||||||
|
gcKeepDerivations = true;
|
||||||
|
autoOptimiseStore = false;
|
||||||
|
envKeepDerivations = false;
|
||||||
|
lockCPU = getEnv("NIX_AFFINITY_HACK", "1") == "1";
|
||||||
|
showTrace = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Settings::processEnvironment()
|
||||||
|
{
|
||||||
|
nixStore = canonPath(getEnv("NIX_STORE_DIR", getEnv("NIX_STORE", NIX_STORE_DIR)));
|
||||||
|
nixDataDir = canonPath(getEnv("NIX_DATA_DIR", NIX_DATA_DIR));
|
||||||
|
nixLogDir = canonPath(getEnv("NIX_LOG_DIR", NIX_LOG_DIR));
|
||||||
|
nixStateDir = canonPath(getEnv("NIX_STATE_DIR", NIX_STATE_DIR));
|
||||||
|
nixDBPath = getEnv("NIX_DB_DIR", nixStateDir + "/db");
|
||||||
|
nixConfDir = canonPath(getEnv("NIX_CONF_DIR", NIX_CONF_DIR));
|
||||||
|
nixLibexecDir = canonPath(getEnv("NIX_LIBEXEC_DIR", NIX_LIBEXEC_DIR));
|
||||||
|
nixBinDir = canonPath(getEnv("NIX_BIN_DIR", NIX_BIN_DIR));
|
||||||
|
nixDaemonSocketFile = canonPath(nixStateDir + DEFAULT_SOCKET_PATH);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Settings::loadConfFile()
|
||||||
|
{
|
||||||
|
Path settingsFile = (format("%1%/%2%") % nixConfDir % "nix.conf").str();
|
||||||
|
if (!pathExists(settingsFile)) return;
|
||||||
|
string contents = readFile(settingsFile);
|
||||||
|
|
||||||
|
unsigned int pos = 0;
|
||||||
|
|
||||||
|
while (pos < contents.size()) {
|
||||||
|
string line;
|
||||||
|
while (pos < contents.size() && contents[pos] != '\n')
|
||||||
|
line += contents[pos++];
|
||||||
|
pos++;
|
||||||
|
|
||||||
|
string::size_type hash = line.find('#');
|
||||||
|
if (hash != string::npos)
|
||||||
|
line = string(line, 0, hash);
|
||||||
|
|
||||||
|
vector<string> tokens = tokenizeString<vector<string> >(line);
|
||||||
|
if (tokens.empty()) continue;
|
||||||
|
|
||||||
|
if (tokens.size() < 2 || tokens[1] != "=")
|
||||||
|
throw Error(format("illegal configuration line `%1%' in `%2%'") % line % settingsFile);
|
||||||
|
|
||||||
|
string name = tokens[0];
|
||||||
|
|
||||||
|
vector<string>::iterator i = tokens.begin();
|
||||||
|
advance(i, 2);
|
||||||
|
settings[name] = concatStringsSep(" ", Strings(i, tokens.end())); // FIXME: slow
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Settings::set(const string & name, const string & value)
|
||||||
|
{
|
||||||
|
settings[name] = value;
|
||||||
|
overrides[name] = value;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Settings::update()
|
||||||
|
{
|
||||||
|
get(tryFallback, "build-fallback");
|
||||||
|
get(maxBuildJobs, "build-max-jobs");
|
||||||
|
get(buildCores, "build-cores");
|
||||||
|
get(thisSystem, "system");
|
||||||
|
get(maxSilentTime, "build-max-silent-time");
|
||||||
|
get(buildTimeout, "build-timeout");
|
||||||
|
get(reservedSize, "gc-reserved-space");
|
||||||
|
get(fsyncMetadata, "fsync-metadata");
|
||||||
|
get(useSQLiteWAL, "use-sqlite-wal");
|
||||||
|
get(syncBeforeRegistering, "sync-before-registering");
|
||||||
|
get(useSubstitutes, "build-use-substitutes");
|
||||||
|
get(buildUsersGroup, "build-users-group");
|
||||||
|
get(useChroot, "build-use-chroot");
|
||||||
|
get(dirsInChroot, "build-chroot-dirs");
|
||||||
|
get(impersonateLinux26, "build-impersonate-linux-26");
|
||||||
|
get(keepLog, "build-keep-log");
|
||||||
|
get(compressLog, "build-compress-log");
|
||||||
|
get(maxLogSize, "build-max-log-size");
|
||||||
|
get(cacheFailure, "build-cache-failure");
|
||||||
|
get(pollInterval, "build-poll-interval");
|
||||||
|
get(checkRootReachability, "gc-check-reachability");
|
||||||
|
get(gcKeepOutputs, "gc-keep-outputs");
|
||||||
|
get(gcKeepDerivations, "gc-keep-derivations");
|
||||||
|
get(autoOptimiseStore, "auto-optimise-store");
|
||||||
|
get(envKeepDerivations, "env-keep-derivations");
|
||||||
|
get(sshSubstituterHosts, "ssh-substituter-hosts");
|
||||||
|
get(useSshSubstituter, "use-ssh-substituter");
|
||||||
|
|
||||||
|
string subs = getEnv("NIX_SUBSTITUTERS", "default");
|
||||||
|
if (subs == "default") {
|
||||||
|
substituters.clear();
|
||||||
|
#if 0
|
||||||
|
if (getEnv("NIX_OTHER_STORES") != "")
|
||||||
|
substituters.push_back(nixLibexecDir + "/nix/substituters/copy-from-other-stores.pl");
|
||||||
|
#endif
|
||||||
|
substituters.push_back(nixLibexecDir + "/nix/substituters/download-using-manifests.pl");
|
||||||
|
substituters.push_back(nixLibexecDir + "/nix/substituters/download-from-binary-cache.pl");
|
||||||
|
if (useSshSubstituter)
|
||||||
|
substituters.push_back(nixLibexecDir + "/nix/substituters/download-via-ssh");
|
||||||
|
} else
|
||||||
|
substituters = tokenizeString<Strings>(subs, ":");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Settings::get(string & res, const string & name)
|
||||||
|
{
|
||||||
|
SettingsMap::iterator i = settings.find(name);
|
||||||
|
if (i == settings.end()) return;
|
||||||
|
res = i->second;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Settings::get(bool & res, const string & name)
|
||||||
|
{
|
||||||
|
SettingsMap::iterator i = settings.find(name);
|
||||||
|
if (i == settings.end()) return;
|
||||||
|
if (i->second == "true") res = true;
|
||||||
|
else if (i->second == "false") res = false;
|
||||||
|
else throw Error(format("configuration option `%1%' should be either `true' or `false', not `%2%'")
|
||||||
|
% name % i->second);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Settings::get(StringSet & res, const string & name)
|
||||||
|
{
|
||||||
|
SettingsMap::iterator i = settings.find(name);
|
||||||
|
if (i == settings.end()) return;
|
||||||
|
res.clear();
|
||||||
|
Strings ss = tokenizeString<Strings>(i->second);
|
||||||
|
res.insert(ss.begin(), ss.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
void Settings::get(Strings & res, const string & name)
|
||||||
|
{
|
||||||
|
SettingsMap::iterator i = settings.find(name);
|
||||||
|
if (i == settings.end()) return;
|
||||||
|
res = tokenizeString<Strings>(i->second);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<class N> void Settings::get(N & res, const string & name)
|
||||||
|
{
|
||||||
|
SettingsMap::iterator i = settings.find(name);
|
||||||
|
if (i == settings.end()) return;
|
||||||
|
if (!string2Int(i->second, res))
|
||||||
|
throw Error(format("configuration setting `%1%' should have an integer value") % name);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
string Settings::pack()
|
||||||
|
{
|
||||||
|
string s;
|
||||||
|
foreach (SettingsMap::iterator, i, settings) {
|
||||||
|
if (i->first.find('\n') != string::npos ||
|
||||||
|
i->first.find('=') != string::npos ||
|
||||||
|
i->second.find('\n') != string::npos)
|
||||||
|
throw Error("illegal option name/value");
|
||||||
|
s += i->first; s += '='; s += i->second; s += '\n';
|
||||||
|
}
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Settings::unpack(const string & pack) {
|
||||||
|
Strings lines = tokenizeString<Strings>(pack, "\n");
|
||||||
|
foreach (Strings::iterator, i, lines) {
|
||||||
|
string::size_type eq = i->find('=');
|
||||||
|
if (eq == string::npos)
|
||||||
|
throw Error("illegal option name/value");
|
||||||
|
set(i->substr(0, eq), i->substr(eq + 1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Settings::SettingsMap Settings::getOverrides()
|
||||||
|
{
|
||||||
|
return overrides;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
const string nixVersion = PACKAGE_VERSION;
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,218 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "types.hh"
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <sys/types.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
struct Settings {
|
||||||
|
|
||||||
|
typedef std::map<string, string> SettingsMap;
|
||||||
|
|
||||||
|
Settings();
|
||||||
|
|
||||||
|
void processEnvironment();
|
||||||
|
|
||||||
|
void loadConfFile();
|
||||||
|
|
||||||
|
void set(const string & name, const string & value);
|
||||||
|
|
||||||
|
void update();
|
||||||
|
|
||||||
|
string pack();
|
||||||
|
|
||||||
|
void unpack(const string & pack);
|
||||||
|
|
||||||
|
SettingsMap getOverrides();
|
||||||
|
|
||||||
|
/* The directory where we store sources and derived files. */
|
||||||
|
Path nixStore;
|
||||||
|
|
||||||
|
Path nixDataDir; /* !!! fix */
|
||||||
|
|
||||||
|
/* The directory where we log various operations. */
|
||||||
|
Path nixLogDir;
|
||||||
|
|
||||||
|
/* The directory where state is stored. */
|
||||||
|
Path nixStateDir;
|
||||||
|
|
||||||
|
/* The directory where we keep the SQLite database. */
|
||||||
|
Path nixDBPath;
|
||||||
|
|
||||||
|
/* The directory where configuration files are stored. */
|
||||||
|
Path nixConfDir;
|
||||||
|
|
||||||
|
/* The directory where internal helper programs are stored. */
|
||||||
|
Path nixLibexecDir;
|
||||||
|
|
||||||
|
/* The directory where the main programs are stored. */
|
||||||
|
Path nixBinDir;
|
||||||
|
|
||||||
|
/* File name of the socket the daemon listens to. */
|
||||||
|
Path nixDaemonSocketFile;
|
||||||
|
|
||||||
|
/* Whether to keep temporary directories of failed builds. */
|
||||||
|
bool keepFailed;
|
||||||
|
|
||||||
|
/* Whether to keep building subgoals when a sibling (another
|
||||||
|
subgoal of the same goal) fails. */
|
||||||
|
bool keepGoing;
|
||||||
|
|
||||||
|
/* Whether, if we cannot realise the known closure corresponding
|
||||||
|
to a derivation, we should try to normalise the derivation
|
||||||
|
instead. */
|
||||||
|
bool tryFallback;
|
||||||
|
|
||||||
|
/* Verbosity level for build output. */
|
||||||
|
Verbosity buildVerbosity;
|
||||||
|
|
||||||
|
/* Maximum number of parallel build jobs. 0 means unlimited. */
|
||||||
|
unsigned int maxBuildJobs;
|
||||||
|
|
||||||
|
/* Number of CPU cores to utilize in parallel within a build,
|
||||||
|
i.e. by passing this number to Make via '-j'. 0 means that the
|
||||||
|
number of actual CPU cores on the local host ought to be
|
||||||
|
auto-detected. */
|
||||||
|
unsigned int buildCores;
|
||||||
|
|
||||||
|
/* Read-only mode. Don't copy stuff to the store, don't change
|
||||||
|
the database. */
|
||||||
|
bool readOnlyMode;
|
||||||
|
|
||||||
|
/* The canonical system name, as returned by config.guess. */
|
||||||
|
string thisSystem;
|
||||||
|
|
||||||
|
/* The maximum time in seconds that a builer can go without
|
||||||
|
producing any output on stdout/stderr before it is killed. 0
|
||||||
|
means infinity. */
|
||||||
|
time_t maxSilentTime;
|
||||||
|
|
||||||
|
/* The maximum duration in seconds that a builder can run. 0
|
||||||
|
means infinity. */
|
||||||
|
time_t buildTimeout;
|
||||||
|
|
||||||
|
/* The substituters. There are programs that can somehow realise
|
||||||
|
a store path without building, e.g., by downloading it or
|
||||||
|
copying it from a CD. */
|
||||||
|
Paths substituters;
|
||||||
|
|
||||||
|
/* Whether to use build hooks (for distributed builds). Sometimes
|
||||||
|
users want to disable this from the command-line. */
|
||||||
|
bool useBuildHook;
|
||||||
|
|
||||||
|
/* Whether buildDerivations() should print out lines on stderr in
|
||||||
|
a fixed format to allow its progress to be monitored. Each
|
||||||
|
line starts with a "@". The following are defined:
|
||||||
|
|
||||||
|
@ build-started <drvpath> <outpath> <system> <logfile>
|
||||||
|
@ build-failed <drvpath> <outpath> <exitcode> <error text>
|
||||||
|
@ build-succeeded <drvpath> <outpath>
|
||||||
|
@ substituter-started <outpath> <substituter>
|
||||||
|
@ substituter-failed <outpath> <exitcode> <error text>
|
||||||
|
@ substituter-succeeded <outpath>
|
||||||
|
|
||||||
|
Best combined with --no-build-output, otherwise stderr might
|
||||||
|
conceivably contain lines in this format printed by the
|
||||||
|
builders. */
|
||||||
|
bool printBuildTrace;
|
||||||
|
|
||||||
|
/* Amount of reserved space for the garbage collector
|
||||||
|
(/nix/var/nix/db/reserved). */
|
||||||
|
off_t reservedSize;
|
||||||
|
|
||||||
|
/* Whether SQLite should use fsync. */
|
||||||
|
bool fsyncMetadata;
|
||||||
|
|
||||||
|
/* Whether SQLite should use WAL mode. */
|
||||||
|
bool useSQLiteWAL;
|
||||||
|
|
||||||
|
/* Whether to call sync() before registering a path as valid. */
|
||||||
|
bool syncBeforeRegistering;
|
||||||
|
|
||||||
|
/* Whether to use substitutes. */
|
||||||
|
bool useSubstitutes;
|
||||||
|
|
||||||
|
/* The Unix group that contains the build users. */
|
||||||
|
string buildUsersGroup;
|
||||||
|
|
||||||
|
/* Whether to build in chroot. */
|
||||||
|
bool useChroot;
|
||||||
|
|
||||||
|
/* The directories from the host filesystem to be included in the
|
||||||
|
chroot. */
|
||||||
|
StringSet dirsInChroot;
|
||||||
|
|
||||||
|
/* Set of ssh connection strings for the ssh substituter */
|
||||||
|
Strings sshSubstituterHosts;
|
||||||
|
|
||||||
|
/* Whether to use the ssh substituter at all */
|
||||||
|
bool useSshSubstituter;
|
||||||
|
|
||||||
|
/* Whether to impersonate a Linux 2.6 machine on newer kernels. */
|
||||||
|
bool impersonateLinux26;
|
||||||
|
|
||||||
|
/* Whether to store build logs. */
|
||||||
|
bool keepLog;
|
||||||
|
|
||||||
|
/* Whether to compress logs. */
|
||||||
|
bool compressLog;
|
||||||
|
|
||||||
|
/* Maximum number of bytes a builder can write to stdout/stderr
|
||||||
|
before being killed (0 means no limit). */
|
||||||
|
unsigned long maxLogSize;
|
||||||
|
|
||||||
|
/* Whether to cache build failures. */
|
||||||
|
bool cacheFailure;
|
||||||
|
|
||||||
|
/* How often (in seconds) to poll for locks. */
|
||||||
|
unsigned int pollInterval;
|
||||||
|
|
||||||
|
/* Whether to check if new GC roots can in fact be found by the
|
||||||
|
garbage collector. */
|
||||||
|
bool checkRootReachability;
|
||||||
|
|
||||||
|
/* Whether the garbage collector should keep outputs of live
|
||||||
|
derivations. */
|
||||||
|
bool gcKeepOutputs;
|
||||||
|
|
||||||
|
/* Whether the garbage collector should keep derivers of live
|
||||||
|
paths. */
|
||||||
|
bool gcKeepDerivations;
|
||||||
|
|
||||||
|
/* Whether to automatically replace files with identical contents
|
||||||
|
with hard links. */
|
||||||
|
bool autoOptimiseStore;
|
||||||
|
|
||||||
|
/* Whether to add derivations as a dependency of user environments
|
||||||
|
(to prevent them from being GCed). */
|
||||||
|
bool envKeepDerivations;
|
||||||
|
|
||||||
|
/* Whether to lock the Nix client and worker to the same CPU. */
|
||||||
|
bool lockCPU;
|
||||||
|
|
||||||
|
/* Whether to show a stack trace if Nix evaluation fails. */
|
||||||
|
bool showTrace;
|
||||||
|
|
||||||
|
private:
|
||||||
|
SettingsMap settings, overrides;
|
||||||
|
|
||||||
|
void get(string & res, const string & name);
|
||||||
|
void get(bool & res, const string & name);
|
||||||
|
void get(StringSet & res, const string & name);
|
||||||
|
void get(Strings & res, const string & name);
|
||||||
|
template<class N> void get(N & res, const string & name);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
// FIXME: don't use a global variable.
|
||||||
|
extern Settings settings;
|
||||||
|
|
||||||
|
|
||||||
|
extern const string nixVersion;
|
||||||
|
|
||||||
|
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,333 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "store-api.hh"
|
||||||
|
#include "util.hh"
|
||||||
|
#include "pathlocks.hh"
|
||||||
|
|
||||||
|
|
||||||
|
class sqlite3;
|
||||||
|
class sqlite3_stmt;
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
/* Nix store and database schema version. Version 1 (or 0) was Nix <=
|
||||||
|
0.7. Version 2 was Nix 0.8 and 0.9. Version 3 is Nix 0.10.
|
||||||
|
Version 4 is Nix 0.11. Version 5 is Nix 0.12-0.16. Version 6 is
|
||||||
|
Nix 1.0. Version 7 is Nix 1.3. */
|
||||||
|
const int nixSchemaVersion = 7;
|
||||||
|
|
||||||
|
|
||||||
|
extern string drvsLogDir;
|
||||||
|
|
||||||
|
|
||||||
|
struct Derivation;
|
||||||
|
|
||||||
|
|
||||||
|
struct OptimiseStats
|
||||||
|
{
|
||||||
|
unsigned long totalFiles;
|
||||||
|
unsigned long sameContents;
|
||||||
|
unsigned long filesLinked;
|
||||||
|
unsigned long long bytesFreed;
|
||||||
|
unsigned long long blocksFreed;
|
||||||
|
OptimiseStats()
|
||||||
|
{
|
||||||
|
totalFiles = sameContents = filesLinked = 0;
|
||||||
|
bytesFreed = blocksFreed = 0;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
struct RunningSubstituter
|
||||||
|
{
|
||||||
|
Path program;
|
||||||
|
Pid pid;
|
||||||
|
AutoCloseFD to, from, error;
|
||||||
|
FdSource fromBuf;
|
||||||
|
bool disabled;
|
||||||
|
RunningSubstituter() : disabled(false) { };
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* Wrapper object to close the SQLite database automatically. */
|
||||||
|
struct SQLite
|
||||||
|
{
|
||||||
|
sqlite3 * db;
|
||||||
|
SQLite() { db = 0; }
|
||||||
|
~SQLite();
|
||||||
|
operator sqlite3 * () { return db; }
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* Wrapper object to create and destroy SQLite prepared statements. */
|
||||||
|
struct SQLiteStmt
|
||||||
|
{
|
||||||
|
sqlite3 * db;
|
||||||
|
sqlite3_stmt * stmt;
|
||||||
|
unsigned int curArg;
|
||||||
|
SQLiteStmt() { stmt = 0; }
|
||||||
|
void create(sqlite3 * db, const string & s);
|
||||||
|
void reset();
|
||||||
|
~SQLiteStmt();
|
||||||
|
operator sqlite3_stmt * () { return stmt; }
|
||||||
|
void bind(const string & value);
|
||||||
|
void bind(int value);
|
||||||
|
void bind64(long long value);
|
||||||
|
void bind();
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class LocalStore : public StoreAPI
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
typedef std::map<Path, RunningSubstituter> RunningSubstituters;
|
||||||
|
RunningSubstituters runningSubstituters;
|
||||||
|
|
||||||
|
Path linksDir;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
/* Initialise the local store, upgrading the schema if
|
||||||
|
necessary. */
|
||||||
|
LocalStore(bool reserveSpace = true);
|
||||||
|
|
||||||
|
~LocalStore();
|
||||||
|
|
||||||
|
/* Implementations of abstract store API methods. */
|
||||||
|
|
||||||
|
bool isValidPath(const Path & path);
|
||||||
|
|
||||||
|
PathSet queryValidPaths(const PathSet & paths);
|
||||||
|
|
||||||
|
PathSet queryAllValidPaths();
|
||||||
|
|
||||||
|
ValidPathInfo queryPathInfo(const Path & path);
|
||||||
|
|
||||||
|
Hash queryPathHash(const Path & path);
|
||||||
|
|
||||||
|
void queryReferences(const Path & path, PathSet & references);
|
||||||
|
|
||||||
|
void queryReferrers(const Path & path, PathSet & referrers);
|
||||||
|
|
||||||
|
Path queryDeriver(const Path & path);
|
||||||
|
|
||||||
|
PathSet queryValidDerivers(const Path & path);
|
||||||
|
|
||||||
|
PathSet queryDerivationOutputs(const Path & path);
|
||||||
|
|
||||||
|
StringSet queryDerivationOutputNames(const Path & path);
|
||||||
|
|
||||||
|
Path queryPathFromHashPart(const string & hashPart);
|
||||||
|
|
||||||
|
PathSet querySubstitutablePaths(const PathSet & paths);
|
||||||
|
|
||||||
|
void querySubstitutablePathInfos(const Path & substituter,
|
||||||
|
PathSet & paths, SubstitutablePathInfos & infos);
|
||||||
|
|
||||||
|
void querySubstitutablePathInfos(const PathSet & paths,
|
||||||
|
SubstitutablePathInfos & infos);
|
||||||
|
|
||||||
|
Path addToStore(const Path & srcPath,
|
||||||
|
bool recursive = true, HashType hashAlgo = htSHA256,
|
||||||
|
PathFilter & filter = defaultPathFilter, bool repair = false);
|
||||||
|
|
||||||
|
/* Like addToStore(), but the contents of the path are contained
|
||||||
|
in `dump', which is either a NAR serialisation (if recursive ==
|
||||||
|
true) or simply the contents of a regular file (if recursive ==
|
||||||
|
false). */
|
||||||
|
Path addToStoreFromDump(const string & dump, const string & name,
|
||||||
|
bool recursive = true, HashType hashAlgo = htSHA256, bool repair = false);
|
||||||
|
|
||||||
|
Path addTextToStore(const string & name, const string & s,
|
||||||
|
const PathSet & references, bool repair = false);
|
||||||
|
|
||||||
|
void exportPath(const Path & path, bool sign,
|
||||||
|
Sink & sink);
|
||||||
|
|
||||||
|
Paths importPaths(bool requireSignature, Source & source);
|
||||||
|
|
||||||
|
void buildPaths(const PathSet & paths, BuildMode buildMode);
|
||||||
|
|
||||||
|
void ensurePath(const Path & path);
|
||||||
|
|
||||||
|
void addTempRoot(const Path & path);
|
||||||
|
|
||||||
|
void addIndirectRoot(const Path & path);
|
||||||
|
|
||||||
|
void syncWithGC();
|
||||||
|
|
||||||
|
Roots findRoots();
|
||||||
|
|
||||||
|
void collectGarbage(const GCOptions & options, GCResults & results);
|
||||||
|
|
||||||
|
/* Optimise the disk space usage of the Nix store by hard-linking
|
||||||
|
files with the same contents. */
|
||||||
|
void optimiseStore(OptimiseStats & stats);
|
||||||
|
|
||||||
|
/* Optimise a single store path. */
|
||||||
|
void optimisePath(const Path & path);
|
||||||
|
|
||||||
|
/* Check the integrity of the Nix store. Returns true if errors
|
||||||
|
remain. */
|
||||||
|
bool verifyStore(bool checkContents, bool repair);
|
||||||
|
|
||||||
|
/* Register the validity of a path, i.e., that `path' exists, that
|
||||||
|
the paths referenced by it exists, and in the case of an output
|
||||||
|
path of a derivation, that it has been produced by a successful
|
||||||
|
execution of the derivation (or something equivalent). Also
|
||||||
|
register the hash of the file system contents of the path. The
|
||||||
|
hash must be a SHA-256 hash. */
|
||||||
|
void registerValidPath(const ValidPathInfo & info);
|
||||||
|
|
||||||
|
void registerValidPaths(const ValidPathInfos & infos);
|
||||||
|
|
||||||
|
/* Register that the build of a derivation with output `path' has
|
||||||
|
failed. */
|
||||||
|
void registerFailedPath(const Path & path);
|
||||||
|
|
||||||
|
/* Query whether `path' previously failed to build. */
|
||||||
|
bool hasPathFailed(const Path & path);
|
||||||
|
|
||||||
|
PathSet queryFailedPaths();
|
||||||
|
|
||||||
|
void clearFailedPaths(const PathSet & paths);
|
||||||
|
|
||||||
|
void vacuumDB();
|
||||||
|
|
||||||
|
/* Repair the contents of the given path by redownloading it using
|
||||||
|
a substituter (if available). */
|
||||||
|
void repairPath(const Path & path);
|
||||||
|
|
||||||
|
/* Check whether the given valid path exists and has the right
|
||||||
|
contents. */
|
||||||
|
bool pathContentsGood(const Path & path);
|
||||||
|
|
||||||
|
void markContentsGood(const Path & path);
|
||||||
|
|
||||||
|
void setSubstituterEnv();
|
||||||
|
|
||||||
|
private:
|
||||||
|
|
||||||
|
Path schemaPath;
|
||||||
|
|
||||||
|
/* Lock file used for upgrading. */
|
||||||
|
AutoCloseFD globalLock;
|
||||||
|
|
||||||
|
/* The SQLite database object. */
|
||||||
|
SQLite db;
|
||||||
|
|
||||||
|
/* Some precompiled SQLite statements. */
|
||||||
|
SQLiteStmt stmtRegisterValidPath;
|
||||||
|
SQLiteStmt stmtUpdatePathInfo;
|
||||||
|
SQLiteStmt stmtAddReference;
|
||||||
|
SQLiteStmt stmtQueryPathInfo;
|
||||||
|
SQLiteStmt stmtQueryReferences;
|
||||||
|
SQLiteStmt stmtQueryReferrers;
|
||||||
|
SQLiteStmt stmtInvalidatePath;
|
||||||
|
SQLiteStmt stmtRegisterFailedPath;
|
||||||
|
SQLiteStmt stmtHasPathFailed;
|
||||||
|
SQLiteStmt stmtQueryFailedPaths;
|
||||||
|
SQLiteStmt stmtClearFailedPath;
|
||||||
|
SQLiteStmt stmtAddDerivationOutput;
|
||||||
|
SQLiteStmt stmtQueryValidDerivers;
|
||||||
|
SQLiteStmt stmtQueryDerivationOutputs;
|
||||||
|
SQLiteStmt stmtQueryPathFromHashPart;
|
||||||
|
|
||||||
|
/* Cache for pathContentsGood(). */
|
||||||
|
std::map<Path, bool> pathContentsGoodCache;
|
||||||
|
|
||||||
|
bool didSetSubstituterEnv;
|
||||||
|
|
||||||
|
int getSchema();
|
||||||
|
|
||||||
|
void openDB(bool create);
|
||||||
|
|
||||||
|
void makeStoreWritable();
|
||||||
|
|
||||||
|
unsigned long long queryValidPathId(const Path & path);
|
||||||
|
|
||||||
|
unsigned long long addValidPath(const ValidPathInfo & info, bool checkOutputs = true);
|
||||||
|
|
||||||
|
void addReference(unsigned long long referrer, unsigned long long reference);
|
||||||
|
|
||||||
|
void appendReferrer(const Path & from, const Path & to, bool lock);
|
||||||
|
|
||||||
|
void rewriteReferrers(const Path & path, bool purge, PathSet referrers);
|
||||||
|
|
||||||
|
void invalidatePath(const Path & path);
|
||||||
|
|
||||||
|
/* Delete a path from the Nix store. */
|
||||||
|
void invalidatePathChecked(const Path & path);
|
||||||
|
|
||||||
|
void verifyPath(const Path & path, const PathSet & store,
|
||||||
|
PathSet & done, PathSet & validPaths, bool repair, bool & errors);
|
||||||
|
|
||||||
|
void updatePathInfo(const ValidPathInfo & info);
|
||||||
|
|
||||||
|
void upgradeStore6();
|
||||||
|
void upgradeStore7();
|
||||||
|
PathSet queryValidPathsOld();
|
||||||
|
ValidPathInfo queryPathInfoOld(const Path & path);
|
||||||
|
|
||||||
|
struct GCState;
|
||||||
|
|
||||||
|
void deleteGarbage(GCState & state, const Path & path);
|
||||||
|
|
||||||
|
void tryToDelete(GCState & state, const Path & path);
|
||||||
|
|
||||||
|
bool canReachRoot(GCState & state, PathSet & visited, const Path & path);
|
||||||
|
|
||||||
|
void deletePathRecursive(GCState & state, const Path & path);
|
||||||
|
|
||||||
|
bool isActiveTempFile(const GCState & state,
|
||||||
|
const Path & path, const string & suffix);
|
||||||
|
|
||||||
|
int openGCLock(LockType lockType);
|
||||||
|
|
||||||
|
void removeUnusedLinks(const GCState & state);
|
||||||
|
|
||||||
|
void startSubstituter(const Path & substituter,
|
||||||
|
RunningSubstituter & runningSubstituter);
|
||||||
|
|
||||||
|
string getLineFromSubstituter(RunningSubstituter & run);
|
||||||
|
|
||||||
|
template<class T> T getIntLineFromSubstituter(RunningSubstituter & run);
|
||||||
|
|
||||||
|
Path createTempDirInStore();
|
||||||
|
|
||||||
|
Path importPath(bool requireSignature, Source & source);
|
||||||
|
|
||||||
|
void checkDerivationOutputs(const Path & drvPath, const Derivation & drv);
|
||||||
|
|
||||||
|
void optimisePath_(OptimiseStats & stats, const Path & path);
|
||||||
|
|
||||||
|
// Internal versions that are not wrapped in retry_sqlite.
|
||||||
|
bool isValidPath_(const Path & path);
|
||||||
|
void queryReferrers_(const Path & path, PathSet & referrers);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
typedef std::pair<dev_t, ino_t> Inode;
|
||||||
|
typedef set<Inode> InodesSeen;
|
||||||
|
|
||||||
|
|
||||||
|
/* "Fix", or canonicalise, the meta-data of the files in a store path
|
||||||
|
after it has been built. In particular:
|
||||||
|
- the last modification date on each file is set to 1 (i.e.,
|
||||||
|
00:00:01 1/1/1970 UTC)
|
||||||
|
- the permissions are set of 444 or 555 (i.e., read-only with or
|
||||||
|
without execute permission; setuid bits etc. are cleared)
|
||||||
|
- the owner and group are set to the Nix user and group, if we're
|
||||||
|
running as root. */
|
||||||
|
void canonicalisePathMetaData(const Path & path, uid_t fromUid, InodesSeen & inodesSeen);
|
||||||
|
void canonicalisePathMetaData(const Path & path, uid_t fromUid);
|
||||||
|
|
||||||
|
void canonicaliseTimestampAndPermissions(const Path & path);
|
||||||
|
|
||||||
|
MakeError(PathInUse, Error);
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,220 @@
|
||||||
|
#include "misc.hh"
|
||||||
|
#include "store-api.hh"
|
||||||
|
#include "local-store.hh"
|
||||||
|
#include "globals.hh"
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
Derivation derivationFromPath(StoreAPI & store, const Path & drvPath)
|
||||||
|
{
|
||||||
|
assertStorePath(drvPath);
|
||||||
|
store.ensurePath(drvPath);
|
||||||
|
return parseDerivation(readFile(drvPath));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void computeFSClosure(StoreAPI & store, const Path & path,
|
||||||
|
PathSet & paths, bool flipDirection, bool includeOutputs, bool includeDerivers)
|
||||||
|
{
|
||||||
|
if (paths.find(path) != paths.end()) return;
|
||||||
|
paths.insert(path);
|
||||||
|
|
||||||
|
PathSet edges;
|
||||||
|
|
||||||
|
if (flipDirection) {
|
||||||
|
store.queryReferrers(path, edges);
|
||||||
|
|
||||||
|
if (includeOutputs) {
|
||||||
|
PathSet derivers = store.queryValidDerivers(path);
|
||||||
|
foreach (PathSet::iterator, i, derivers)
|
||||||
|
edges.insert(*i);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (includeDerivers && isDerivation(path)) {
|
||||||
|
PathSet outputs = store.queryDerivationOutputs(path);
|
||||||
|
foreach (PathSet::iterator, i, outputs)
|
||||||
|
if (store.isValidPath(*i) && store.queryDeriver(*i) == path)
|
||||||
|
edges.insert(*i);
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
store.queryReferences(path, edges);
|
||||||
|
|
||||||
|
if (includeOutputs && isDerivation(path)) {
|
||||||
|
PathSet outputs = store.queryDerivationOutputs(path);
|
||||||
|
foreach (PathSet::iterator, i, outputs)
|
||||||
|
if (store.isValidPath(*i)) edges.insert(*i);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (includeDerivers) {
|
||||||
|
Path deriver = store.queryDeriver(path);
|
||||||
|
if (store.isValidPath(deriver)) edges.insert(deriver);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
foreach (PathSet::iterator, i, edges)
|
||||||
|
computeFSClosure(store, *i, paths, flipDirection, includeOutputs, includeDerivers);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Path findOutput(const Derivation & drv, string id)
|
||||||
|
{
|
||||||
|
foreach (DerivationOutputs::const_iterator, i, drv.outputs)
|
||||||
|
if (i->first == id) return i->second.path;
|
||||||
|
throw Error(format("derivation has no output `%1%'") % id);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void queryMissing(StoreAPI & store, const PathSet & targets,
|
||||||
|
PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
|
||||||
|
unsigned long long & downloadSize, unsigned long long & narSize)
|
||||||
|
{
|
||||||
|
downloadSize = narSize = 0;
|
||||||
|
|
||||||
|
PathSet todo(targets.begin(), targets.end()), done;
|
||||||
|
|
||||||
|
/* Getting substitute info has high latency when using the binary
|
||||||
|
cache substituter. Thus it's essential to do substitute
|
||||||
|
queries in parallel as much as possible. To accomplish this
|
||||||
|
we do the following:
|
||||||
|
|
||||||
|
- For all paths still to be processed (‘todo’), we add all
|
||||||
|
paths for which we need info to the set ‘query’. For an
|
||||||
|
unbuilt derivation this is the output paths; otherwise, it's
|
||||||
|
the path itself.
|
||||||
|
|
||||||
|
- We get info about all paths in ‘query’ in parallel.
|
||||||
|
|
||||||
|
- We process the results and add new items to ‘todo’ if
|
||||||
|
necessary. E.g. if a path is substitutable, then we need to
|
||||||
|
get info on its references.
|
||||||
|
|
||||||
|
- Repeat until ‘todo’ is empty.
|
||||||
|
*/
|
||||||
|
|
||||||
|
while (!todo.empty()) {
|
||||||
|
|
||||||
|
PathSet query, todoDrv, todoNonDrv;
|
||||||
|
|
||||||
|
foreach (PathSet::iterator, i, todo) {
|
||||||
|
if (done.find(*i) != done.end()) continue;
|
||||||
|
done.insert(*i);
|
||||||
|
|
||||||
|
DrvPathWithOutputs i2 = parseDrvPathWithOutputs(*i);
|
||||||
|
|
||||||
|
if (isDerivation(i2.first)) {
|
||||||
|
if (!store.isValidPath(i2.first)) {
|
||||||
|
// FIXME: we could try to substitute p.
|
||||||
|
unknown.insert(*i);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Derivation drv = derivationFromPath(store, i2.first);
|
||||||
|
|
||||||
|
PathSet invalid;
|
||||||
|
foreach (DerivationOutputs::iterator, j, drv.outputs)
|
||||||
|
if (wantOutput(j->first, i2.second)
|
||||||
|
&& !store.isValidPath(j->second.path))
|
||||||
|
invalid.insert(j->second.path);
|
||||||
|
if (invalid.empty()) continue;
|
||||||
|
|
||||||
|
todoDrv.insert(*i);
|
||||||
|
if (settings.useSubstitutes && !willBuildLocally(drv))
|
||||||
|
query.insert(invalid.begin(), invalid.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
else {
|
||||||
|
if (store.isValidPath(*i)) continue;
|
||||||
|
query.insert(*i);
|
||||||
|
todoNonDrv.insert(*i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
todo.clear();
|
||||||
|
|
||||||
|
SubstitutablePathInfos infos;
|
||||||
|
store.querySubstitutablePathInfos(query, infos);
|
||||||
|
|
||||||
|
foreach (PathSet::iterator, i, todoDrv) {
|
||||||
|
DrvPathWithOutputs i2 = parseDrvPathWithOutputs(*i);
|
||||||
|
|
||||||
|
// FIXME: cache this
|
||||||
|
Derivation drv = derivationFromPath(store, i2.first);
|
||||||
|
|
||||||
|
PathSet outputs;
|
||||||
|
bool mustBuild = false;
|
||||||
|
if (settings.useSubstitutes && !willBuildLocally(drv)) {
|
||||||
|
foreach (DerivationOutputs::iterator, j, drv.outputs) {
|
||||||
|
if (!wantOutput(j->first, i2.second)) continue;
|
||||||
|
if (!store.isValidPath(j->second.path)) {
|
||||||
|
if (infos.find(j->second.path) == infos.end())
|
||||||
|
mustBuild = true;
|
||||||
|
else
|
||||||
|
outputs.insert(j->second.path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else
|
||||||
|
mustBuild = true;
|
||||||
|
|
||||||
|
if (mustBuild) {
|
||||||
|
willBuild.insert(i2.first);
|
||||||
|
todo.insert(drv.inputSrcs.begin(), drv.inputSrcs.end());
|
||||||
|
foreach (DerivationInputs::iterator, j, drv.inputDrvs)
|
||||||
|
todo.insert(makeDrvPathWithOutputs(j->first, j->second));
|
||||||
|
} else
|
||||||
|
todoNonDrv.insert(outputs.begin(), outputs.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
foreach (PathSet::iterator, i, todoNonDrv) {
|
||||||
|
done.insert(*i);
|
||||||
|
SubstitutablePathInfos::iterator info = infos.find(*i);
|
||||||
|
if (info != infos.end()) {
|
||||||
|
willSubstitute.insert(*i);
|
||||||
|
downloadSize += info->second.downloadSize;
|
||||||
|
narSize += info->second.narSize;
|
||||||
|
todo.insert(info->second.references.begin(), info->second.references.end());
|
||||||
|
} else
|
||||||
|
unknown.insert(*i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void dfsVisit(StoreAPI & store, const PathSet & paths,
|
||||||
|
const Path & path, PathSet & visited, Paths & sorted,
|
||||||
|
PathSet & parents)
|
||||||
|
{
|
||||||
|
if (parents.find(path) != parents.end())
|
||||||
|
throw BuildError(format("cycle detected in the references of `%1%'") % path);
|
||||||
|
|
||||||
|
if (visited.find(path) != visited.end()) return;
|
||||||
|
visited.insert(path);
|
||||||
|
parents.insert(path);
|
||||||
|
|
||||||
|
PathSet references;
|
||||||
|
if (store.isValidPath(path))
|
||||||
|
store.queryReferences(path, references);
|
||||||
|
|
||||||
|
foreach (PathSet::iterator, i, references)
|
||||||
|
/* Don't traverse into paths that don't exist. That can
|
||||||
|
happen due to substitutes for non-existent paths. */
|
||||||
|
if (*i != path && paths.find(*i) != paths.end())
|
||||||
|
dfsVisit(store, paths, *i, visited, sorted, parents);
|
||||||
|
|
||||||
|
sorted.push_front(path);
|
||||||
|
parents.erase(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Paths topoSortPaths(StoreAPI & store, const PathSet & paths)
|
||||||
|
{
|
||||||
|
Paths sorted;
|
||||||
|
PathSet visited, parents;
|
||||||
|
foreach (PathSet::const_iterator, i, paths)
|
||||||
|
dfsVisit(store, paths, *i, visited, sorted, parents);
|
||||||
|
return sorted;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,38 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "derivations.hh"
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
/* Read a derivation, after ensuring its existence through
|
||||||
|
ensurePath(). */
|
||||||
|
Derivation derivationFromPath(StoreAPI & store, const Path & drvPath);
|
||||||
|
|
||||||
|
/* Place in `paths' the set of all store paths in the file system
|
||||||
|
closure of `storePath'; that is, all paths than can be directly or
|
||||||
|
indirectly reached from it. `paths' is not cleared. If
|
||||||
|
`flipDirection' is true, the set of paths that can reach
|
||||||
|
`storePath' is returned; that is, the closures under the
|
||||||
|
`referrers' relation instead of the `references' relation is
|
||||||
|
returned. */
|
||||||
|
void computeFSClosure(StoreAPI & store, const Path & path,
|
||||||
|
PathSet & paths, bool flipDirection = false,
|
||||||
|
bool includeOutputs = false, bool includeDerivers = false);
|
||||||
|
|
||||||
|
/* Return the path corresponding to the output identifier `id' in the
|
||||||
|
given derivation. */
|
||||||
|
Path findOutput(const Derivation & drv, string id);
|
||||||
|
|
||||||
|
/* Given a set of paths that are to be built, return the set of
|
||||||
|
derivations that will be built, and the set of output paths that
|
||||||
|
will be substituted. */
|
||||||
|
void queryMissing(StoreAPI & store, const PathSet & targets,
|
||||||
|
PathSet & willBuild, PathSet & willSubstitute, PathSet & unknown,
|
||||||
|
unsigned long long & downloadSize, unsigned long long & narSize);
|
||||||
|
|
||||||
|
bool willBuildLocally(const Derivation & drv);
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,180 @@
|
||||||
|
#include "config.h"
|
||||||
|
|
||||||
|
#include "util.hh"
|
||||||
|
#include "local-store.hh"
|
||||||
|
#include "globals.hh"
|
||||||
|
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <sys/stat.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <errno.h>
|
||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
static void makeWritable(const Path & path)
|
||||||
|
{
|
||||||
|
struct stat st;
|
||||||
|
if (lstat(path.c_str(), &st))
|
||||||
|
throw SysError(format("getting attributes of path `%1%'") % path);
|
||||||
|
if (chmod(path.c_str(), st.st_mode | S_IWUSR) == -1)
|
||||||
|
throw SysError(format("changing writability of `%1%'") % path);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
struct MakeReadOnly
|
||||||
|
{
|
||||||
|
Path path;
|
||||||
|
MakeReadOnly(const Path & path) : path(path) { }
|
||||||
|
~MakeReadOnly()
|
||||||
|
{
|
||||||
|
try {
|
||||||
|
/* This will make the path read-only. */
|
||||||
|
if (path != "") canonicaliseTimestampAndPermissions(path);
|
||||||
|
} catch (...) {
|
||||||
|
ignoreException();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
void LocalStore::optimisePath_(OptimiseStats & stats, const Path & path)
|
||||||
|
{
|
||||||
|
checkInterrupt();
|
||||||
|
|
||||||
|
struct stat st;
|
||||||
|
if (lstat(path.c_str(), &st))
|
||||||
|
throw SysError(format("getting attributes of path `%1%'") % path);
|
||||||
|
|
||||||
|
if (S_ISDIR(st.st_mode)) {
|
||||||
|
Strings names = readDirectory(path);
|
||||||
|
foreach (Strings::iterator, i, names)
|
||||||
|
optimisePath_(stats, path + "/" + *i);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* We can hard link regular files and maybe symlinks. */
|
||||||
|
if (!S_ISREG(st.st_mode)
|
||||||
|
#if CAN_LINK_SYMLINK
|
||||||
|
&& !S_ISLNK(st.st_mode)
|
||||||
|
#endif
|
||||||
|
) return;
|
||||||
|
|
||||||
|
/* Sometimes SNAFUs can cause files in the Nix store to be
|
||||||
|
modified, in particular when running programs as root under
|
||||||
|
NixOS (example: $fontconfig/var/cache being modified). Skip
|
||||||
|
those files. FIXME: check the modification time. */
|
||||||
|
if (S_ISREG(st.st_mode) && (st.st_mode & S_IWUSR)) {
|
||||||
|
printMsg(lvlError, format("skipping suspicious writable file `%1%'") % path);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Hash the file. Note that hashPath() returns the hash over the
|
||||||
|
NAR serialisation, which includes the execute bit on the file.
|
||||||
|
Thus, executable and non-executable files with the same
|
||||||
|
contents *won't* be linked (which is good because otherwise the
|
||||||
|
permissions would be screwed up).
|
||||||
|
|
||||||
|
Also note that if `path' is a symlink, then we're hashing the
|
||||||
|
contents of the symlink (i.e. the result of readlink()), not
|
||||||
|
the contents of the target (which may not even exist). */
|
||||||
|
Hash hash = hashPath(htSHA256, path).first;
|
||||||
|
stats.totalFiles++;
|
||||||
|
printMsg(lvlDebug, format("`%1%' has hash `%2%'") % path % printHash(hash));
|
||||||
|
|
||||||
|
/* Check if this is a known hash. */
|
||||||
|
Path linkPath = linksDir + "/" + printHash32(hash);
|
||||||
|
|
||||||
|
if (!pathExists(linkPath)) {
|
||||||
|
/* Nope, create a hard link in the links directory. */
|
||||||
|
if (link(path.c_str(), linkPath.c_str()) == 0) return;
|
||||||
|
if (errno != EEXIST)
|
||||||
|
throw SysError(format("cannot link `%1%' to `%2%'") % linkPath % path);
|
||||||
|
/* Fall through if another process created ‘linkPath’ before
|
||||||
|
we did. */
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Yes! We've seen a file with the same contents. Replace the
|
||||||
|
current file with a hard link to that file. */
|
||||||
|
struct stat stLink;
|
||||||
|
if (lstat(linkPath.c_str(), &stLink))
|
||||||
|
throw SysError(format("getting attributes of path `%1%'") % linkPath);
|
||||||
|
|
||||||
|
stats.sameContents++;
|
||||||
|
if (st.st_ino == stLink.st_ino) {
|
||||||
|
printMsg(lvlDebug, format("`%1%' is already linked to `%2%'") % path % linkPath);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
printMsg(lvlTalkative, format("linking `%1%' to `%2%'") % path % linkPath);
|
||||||
|
|
||||||
|
/* Make the containing directory writable, but only if it's not
|
||||||
|
the store itself (we don't want or need to mess with its
|
||||||
|
permissions). */
|
||||||
|
bool mustToggle = !isStorePath(path);
|
||||||
|
if (mustToggle) makeWritable(dirOf(path));
|
||||||
|
|
||||||
|
/* When we're done, make the directory read-only again and reset
|
||||||
|
its timestamp back to 0. */
|
||||||
|
MakeReadOnly makeReadOnly(mustToggle ? dirOf(path) : "");
|
||||||
|
|
||||||
|
Path tempLink = (format("%1%/.tmp-link-%2%-%3%")
|
||||||
|
% settings.nixStore % getpid() % rand()).str();
|
||||||
|
|
||||||
|
if (link(linkPath.c_str(), tempLink.c_str()) == -1) {
|
||||||
|
if (errno == EMLINK) {
|
||||||
|
/* Too many links to the same file (>= 32000 on most file
|
||||||
|
systems). This is likely to happen with empty files.
|
||||||
|
Just shrug and ignore. */
|
||||||
|
if (st.st_size)
|
||||||
|
printMsg(lvlInfo, format("`%1%' has maximum number of links") % linkPath);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
throw SysError(format("cannot link `%1%' to `%2%'") % tempLink % linkPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Atomically replace the old file with the new hard link. */
|
||||||
|
if (rename(tempLink.c_str(), path.c_str()) == -1) {
|
||||||
|
if (unlink(tempLink.c_str()) == -1)
|
||||||
|
printMsg(lvlError, format("unable to unlink `%1%'") % tempLink);
|
||||||
|
if (errno == EMLINK) {
|
||||||
|
/* Some filesystems generate too many links on the rename,
|
||||||
|
rather than on the original link. (Probably it
|
||||||
|
temporarily increases the st_nlink field before
|
||||||
|
decreasing it again.) */
|
||||||
|
if (st.st_size)
|
||||||
|
printMsg(lvlInfo, format("`%1%' has maximum number of links") % linkPath);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
throw SysError(format("cannot rename `%1%' to `%2%'") % tempLink % path);
|
||||||
|
}
|
||||||
|
|
||||||
|
stats.filesLinked++;
|
||||||
|
stats.bytesFreed += st.st_size;
|
||||||
|
stats.blocksFreed += st.st_blocks;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LocalStore::optimiseStore(OptimiseStats & stats)
|
||||||
|
{
|
||||||
|
PathSet paths = queryAllValidPaths();
|
||||||
|
|
||||||
|
foreach (PathSet::iterator, i, paths) {
|
||||||
|
addTempRoot(*i);
|
||||||
|
if (!isValidPath(*i)) continue; /* path was GC'ed, probably */
|
||||||
|
startNest(nest, lvlChatty, format("hashing files in `%1%'") % *i);
|
||||||
|
optimisePath_(stats, *i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LocalStore::optimisePath(const Path & path)
|
||||||
|
{
|
||||||
|
OptimiseStats stats;
|
||||||
|
if (settings.autoOptimiseStore) optimisePath_(stats, path);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,199 @@
|
||||||
|
#include "pathlocks.hh"
|
||||||
|
#include "util.hh"
|
||||||
|
|
||||||
|
#include <cerrno>
|
||||||
|
#include <cstdlib>
|
||||||
|
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <sys/stat.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
int openLockFile(const Path & path, bool create)
|
||||||
|
{
|
||||||
|
AutoCloseFD fd;
|
||||||
|
|
||||||
|
fd = open(path.c_str(), O_RDWR | (create ? O_CREAT : 0), 0600);
|
||||||
|
if (fd == -1 && (create || errno != ENOENT))
|
||||||
|
throw SysError(format("opening lock file `%1%'") % path);
|
||||||
|
|
||||||
|
closeOnExec(fd);
|
||||||
|
|
||||||
|
return fd.borrow();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void deleteLockFile(const Path & path, int fd)
|
||||||
|
{
|
||||||
|
/* Get rid of the lock file. Have to be careful not to introduce
|
||||||
|
races. Write a (meaningless) token to the file to indicate to
|
||||||
|
other processes waiting on this lock that the lock is stale
|
||||||
|
(deleted). */
|
||||||
|
unlink(path.c_str());
|
||||||
|
writeFull(fd, (const unsigned char *) "d", 1);
|
||||||
|
/* Note that the result of unlink() is ignored; removing the lock
|
||||||
|
file is an optimisation, not a necessity. */
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool lockFile(int fd, LockType lockType, bool wait)
|
||||||
|
{
|
||||||
|
struct flock lock;
|
||||||
|
if (lockType == ltRead) lock.l_type = F_RDLCK;
|
||||||
|
else if (lockType == ltWrite) lock.l_type = F_WRLCK;
|
||||||
|
else if (lockType == ltNone) lock.l_type = F_UNLCK;
|
||||||
|
else abort();
|
||||||
|
lock.l_whence = SEEK_SET;
|
||||||
|
lock.l_start = 0;
|
||||||
|
lock.l_len = 0; /* entire file */
|
||||||
|
|
||||||
|
if (wait) {
|
||||||
|
while (fcntl(fd, F_SETLKW, &lock) != 0) {
|
||||||
|
checkInterrupt();
|
||||||
|
if (errno != EINTR)
|
||||||
|
throw SysError(format("acquiring/releasing lock"));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
while (fcntl(fd, F_SETLK, &lock) != 0) {
|
||||||
|
checkInterrupt();
|
||||||
|
if (errno == EACCES || errno == EAGAIN) return false;
|
||||||
|
if (errno != EINTR)
|
||||||
|
throw SysError(format("acquiring/releasing lock"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* This enables us to check whether are not already holding a lock on
|
||||||
|
a file ourselves. POSIX locks (fcntl) suck in this respect: if we
|
||||||
|
close a descriptor, the previous lock will be closed as well. And
|
||||||
|
there is no way to query whether we already have a lock (F_GETLK
|
||||||
|
only works on locks held by other processes). */
|
||||||
|
static StringSet lockedPaths; /* !!! not thread-safe */
|
||||||
|
|
||||||
|
|
||||||
|
PathLocks::PathLocks()
|
||||||
|
: deletePaths(false)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
PathLocks::PathLocks(const PathSet & paths, const string & waitMsg)
|
||||||
|
: deletePaths(false)
|
||||||
|
{
|
||||||
|
lockPaths(paths, waitMsg);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool PathLocks::lockPaths(const PathSet & _paths,
|
||||||
|
const string & waitMsg, bool wait)
|
||||||
|
{
|
||||||
|
assert(fds.empty());
|
||||||
|
|
||||||
|
/* Note that `fds' is built incrementally so that the destructor
|
||||||
|
will only release those locks that we have already acquired. */
|
||||||
|
|
||||||
|
/* Sort the paths. This assures that locks are always acquired in
|
||||||
|
the same order, thus preventing deadlocks. */
|
||||||
|
Paths paths(_paths.begin(), _paths.end());
|
||||||
|
paths.sort();
|
||||||
|
|
||||||
|
/* Acquire the lock for each path. */
|
||||||
|
foreach (Paths::iterator, i, paths) {
|
||||||
|
checkInterrupt();
|
||||||
|
Path path = *i;
|
||||||
|
Path lockPath = path + ".lock";
|
||||||
|
|
||||||
|
debug(format("locking path `%1%'") % path);
|
||||||
|
|
||||||
|
if (lockedPaths.find(lockPath) != lockedPaths.end())
|
||||||
|
throw Error("deadlock: trying to re-acquire self-held lock");
|
||||||
|
|
||||||
|
AutoCloseFD fd;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
|
||||||
|
/* Open/create the lock file. */
|
||||||
|
fd = openLockFile(lockPath, true);
|
||||||
|
|
||||||
|
/* Acquire an exclusive lock. */
|
||||||
|
if (!lockFile(fd, ltWrite, false)) {
|
||||||
|
if (wait) {
|
||||||
|
if (waitMsg != "") printMsg(lvlError, waitMsg);
|
||||||
|
lockFile(fd, ltWrite, true);
|
||||||
|
} else {
|
||||||
|
/* Failed to lock this path; release all other
|
||||||
|
locks. */
|
||||||
|
unlock();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
debug(format("lock acquired on `%1%'") % lockPath);
|
||||||
|
|
||||||
|
/* Check that the lock file hasn't become stale (i.e.,
|
||||||
|
hasn't been unlinked). */
|
||||||
|
struct stat st;
|
||||||
|
if (fstat(fd, &st) == -1)
|
||||||
|
throw SysError(format("statting lock file `%1%'") % lockPath);
|
||||||
|
if (st.st_size != 0)
|
||||||
|
/* This lock file has been unlinked, so we're holding
|
||||||
|
a lock on a deleted file. This means that other
|
||||||
|
processes may create and acquire a lock on
|
||||||
|
`lockPath', and proceed. So we must retry. */
|
||||||
|
debug(format("open lock file `%1%' has become stale") % lockPath);
|
||||||
|
else
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Use borrow so that the descriptor isn't closed. */
|
||||||
|
fds.push_back(FDPair(fd.borrow(), lockPath));
|
||||||
|
lockedPaths.insert(lockPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
PathLocks::~PathLocks()
|
||||||
|
{
|
||||||
|
unlock();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void PathLocks::unlock()
|
||||||
|
{
|
||||||
|
foreach (list<FDPair>::iterator, i, fds) {
|
||||||
|
if (deletePaths) deleteLockFile(i->second, i->first);
|
||||||
|
|
||||||
|
lockedPaths.erase(i->second);
|
||||||
|
if (close(i->first) == -1)
|
||||||
|
printMsg(lvlError,
|
||||||
|
format("error (ignored): cannot close lock file on `%1%'") % i->second);
|
||||||
|
|
||||||
|
debug(format("lock released on `%1%'") % i->second);
|
||||||
|
}
|
||||||
|
|
||||||
|
fds.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void PathLocks::setDeletion(bool deletePaths)
|
||||||
|
{
|
||||||
|
this->deletePaths = deletePaths;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool pathIsLockedByMe(const Path & path)
|
||||||
|
{
|
||||||
|
Path lockPath = path + ".lock";
|
||||||
|
return lockedPaths.find(lockPath) != lockedPaths.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,45 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "types.hh"
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
/* Open (possibly create) a lock file and return the file descriptor.
|
||||||
|
-1 is returned if create is false and the lock could not be opened
|
||||||
|
because it doesn't exist. Any other error throws an exception. */
|
||||||
|
int openLockFile(const Path & path, bool create);
|
||||||
|
|
||||||
|
/* Delete an open lock file. */
|
||||||
|
void deleteLockFile(const Path & path, int fd);
|
||||||
|
|
||||||
|
enum LockType { ltRead, ltWrite, ltNone };
|
||||||
|
|
||||||
|
bool lockFile(int fd, LockType lockType, bool wait);
|
||||||
|
|
||||||
|
|
||||||
|
class PathLocks
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
typedef std::pair<int, Path> FDPair;
|
||||||
|
list<FDPair> fds;
|
||||||
|
bool deletePaths;
|
||||||
|
|
||||||
|
public:
|
||||||
|
PathLocks();
|
||||||
|
PathLocks(const PathSet & paths,
|
||||||
|
const string & waitMsg = "");
|
||||||
|
bool lockPaths(const PathSet & _paths,
|
||||||
|
const string & waitMsg = "",
|
||||||
|
bool wait = true);
|
||||||
|
~PathLocks();
|
||||||
|
void unlock();
|
||||||
|
void setDeletion(bool deletePaths);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
bool pathIsLockedByMe(const Path & path);
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,122 @@
|
||||||
|
#include "references.hh"
|
||||||
|
#include "hash.hh"
|
||||||
|
#include "util.hh"
|
||||||
|
#include "archive.hh"
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <cstdlib>
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
static unsigned int refLength = 32; /* characters */
|
||||||
|
|
||||||
|
|
||||||
|
static void search(const unsigned char * s, unsigned int len,
|
||||||
|
StringSet & hashes, StringSet & seen)
|
||||||
|
{
|
||||||
|
static bool initialised = false;
|
||||||
|
static bool isBase32[256];
|
||||||
|
if (!initialised) {
|
||||||
|
for (unsigned int i = 0; i < 256; ++i) isBase32[i] = false;
|
||||||
|
for (unsigned int i = 0; i < base32Chars.size(); ++i)
|
||||||
|
isBase32[(unsigned char) base32Chars[i]] = true;
|
||||||
|
initialised = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (unsigned int i = 0; i + refLength <= len; ) {
|
||||||
|
int j;
|
||||||
|
bool match = true;
|
||||||
|
for (j = refLength - 1; j >= 0; --j)
|
||||||
|
if (!isBase32[(unsigned char) s[i + j]]) {
|
||||||
|
i += j + 1;
|
||||||
|
match = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (!match) continue;
|
||||||
|
string ref((const char *) s + i, refLength);
|
||||||
|
if (hashes.find(ref) != hashes.end()) {
|
||||||
|
debug(format("found reference to `%1%' at offset `%2%'")
|
||||||
|
% ref % i);
|
||||||
|
seen.insert(ref);
|
||||||
|
hashes.erase(ref);
|
||||||
|
}
|
||||||
|
++i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
struct RefScanSink : Sink
|
||||||
|
{
|
||||||
|
HashSink hashSink;
|
||||||
|
StringSet hashes;
|
||||||
|
StringSet seen;
|
||||||
|
|
||||||
|
string tail;
|
||||||
|
|
||||||
|
RefScanSink() : hashSink(htSHA256) { }
|
||||||
|
|
||||||
|
void operator () (const unsigned char * data, size_t len);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
void RefScanSink::operator () (const unsigned char * data, size_t len)
|
||||||
|
{
|
||||||
|
hashSink(data, len);
|
||||||
|
|
||||||
|
/* It's possible that a reference spans the previous and current
|
||||||
|
fragment, so search in the concatenation of the tail of the
|
||||||
|
previous fragment and the start of the current fragment. */
|
||||||
|
string s = tail + string((const char *) data, len > refLength ? refLength : len);
|
||||||
|
search((const unsigned char *) s.data(), s.size(), hashes, seen);
|
||||||
|
|
||||||
|
search(data, len, hashes, seen);
|
||||||
|
|
||||||
|
unsigned int tailLen = len <= refLength ? len : refLength;
|
||||||
|
tail =
|
||||||
|
string(tail, tail.size() < refLength - tailLen ? 0 : tail.size() - (refLength - tailLen)) +
|
||||||
|
string((const char *) data + len - tailLen, tailLen);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
PathSet scanForReferences(const string & path,
|
||||||
|
const PathSet & refs, HashResult & hash)
|
||||||
|
{
|
||||||
|
RefScanSink sink;
|
||||||
|
std::map<string, Path> backMap;
|
||||||
|
|
||||||
|
/* For efficiency (and a higher hit rate), just search for the
|
||||||
|
hash part of the file name. (This assumes that all references
|
||||||
|
have the form `HASH-bla'). */
|
||||||
|
foreach (PathSet::const_iterator, i, refs) {
|
||||||
|
string baseName = baseNameOf(*i);
|
||||||
|
string::size_type pos = baseName.find('-');
|
||||||
|
if (pos == string::npos)
|
||||||
|
throw Error(format("bad reference `%1%'") % *i);
|
||||||
|
string s = string(baseName, 0, pos);
|
||||||
|
assert(s.size() == refLength);
|
||||||
|
assert(backMap.find(s) == backMap.end());
|
||||||
|
// parseHash(htSHA256, s);
|
||||||
|
sink.hashes.insert(s);
|
||||||
|
backMap[s] = *i;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Look for the hashes in the NAR dump of the path. */
|
||||||
|
dumpPath(path, sink);
|
||||||
|
|
||||||
|
/* Map the hashes found back to their store paths. */
|
||||||
|
PathSet found;
|
||||||
|
foreach (StringSet::iterator, i, sink.seen) {
|
||||||
|
std::map<string, Path>::iterator j;
|
||||||
|
if ((j = backMap.find(*i)) == backMap.end()) abort();
|
||||||
|
found.insert(j->second);
|
||||||
|
}
|
||||||
|
|
||||||
|
hash = sink.hashSink.finish();
|
||||||
|
|
||||||
|
return found;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,11 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "types.hh"
|
||||||
|
#include "hash.hh"
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
PathSet scanForReferences(const Path & path, const PathSet & refs,
|
||||||
|
HashResult & hash);
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,602 @@
|
||||||
|
#include "serialise.hh"
|
||||||
|
#include "util.hh"
|
||||||
|
#include "remote-store.hh"
|
||||||
|
#include "worker-protocol.hh"
|
||||||
|
#include "archive.hh"
|
||||||
|
#include "affinity.hh"
|
||||||
|
#include "globals.hh"
|
||||||
|
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <sys/stat.h>
|
||||||
|
#include <sys/socket.h>
|
||||||
|
#include <sys/un.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <cstring>
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
Path readStorePath(Source & from)
|
||||||
|
{
|
||||||
|
Path path = readString(from);
|
||||||
|
assertStorePath(path);
|
||||||
|
return path;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<class T> T readStorePaths(Source & from)
|
||||||
|
{
|
||||||
|
T paths = readStrings<T>(from);
|
||||||
|
foreach (typename T::iterator, i, paths) assertStorePath(*i);
|
||||||
|
return paths;
|
||||||
|
}
|
||||||
|
|
||||||
|
template PathSet readStorePaths(Source & from);
|
||||||
|
|
||||||
|
|
||||||
|
RemoteStore::RemoteStore()
|
||||||
|
{
|
||||||
|
initialised = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RemoteStore::openConnection(bool reserveSpace)
|
||||||
|
{
|
||||||
|
if (initialised) return;
|
||||||
|
initialised = true;
|
||||||
|
|
||||||
|
string remoteMode = getEnv("NIX_REMOTE");
|
||||||
|
|
||||||
|
if (remoteMode == "daemon")
|
||||||
|
/* Connect to a daemon that does the privileged work for
|
||||||
|
us. */
|
||||||
|
connectToDaemon();
|
||||||
|
else
|
||||||
|
throw Error(format("invalid setting for NIX_REMOTE, `%1%'") % remoteMode);
|
||||||
|
|
||||||
|
from.fd = fdSocket;
|
||||||
|
to.fd = fdSocket;
|
||||||
|
|
||||||
|
/* Send the magic greeting, check for the reply. */
|
||||||
|
try {
|
||||||
|
writeInt(WORKER_MAGIC_1, to);
|
||||||
|
to.flush();
|
||||||
|
unsigned int magic = readInt(from);
|
||||||
|
if (magic != WORKER_MAGIC_2) throw Error("protocol mismatch");
|
||||||
|
|
||||||
|
daemonVersion = readInt(from);
|
||||||
|
if (GET_PROTOCOL_MAJOR(daemonVersion) != GET_PROTOCOL_MAJOR(PROTOCOL_VERSION))
|
||||||
|
throw Error("Nix daemon protocol version not supported");
|
||||||
|
writeInt(PROTOCOL_VERSION, to);
|
||||||
|
|
||||||
|
if (GET_PROTOCOL_MINOR(daemonVersion) >= 14) {
|
||||||
|
int cpu = settings.lockCPU ? lockToCurrentCPU() : -1;
|
||||||
|
if (cpu != -1) {
|
||||||
|
writeInt(1, to);
|
||||||
|
writeInt(cpu, to);
|
||||||
|
} else
|
||||||
|
writeInt(0, to);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (GET_PROTOCOL_MINOR(daemonVersion) >= 11)
|
||||||
|
writeInt(reserveSpace, to);
|
||||||
|
|
||||||
|
processStderr();
|
||||||
|
}
|
||||||
|
catch (Error & e) {
|
||||||
|
throw Error(format("cannot start worker (%1%)")
|
||||||
|
% e.msg());
|
||||||
|
}
|
||||||
|
|
||||||
|
setOptions();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RemoteStore::connectToDaemon()
|
||||||
|
{
|
||||||
|
fdSocket = socket(PF_UNIX, SOCK_STREAM, 0);
|
||||||
|
if (fdSocket == -1)
|
||||||
|
throw SysError("cannot create Unix domain socket");
|
||||||
|
closeOnExec(fdSocket);
|
||||||
|
|
||||||
|
string socketPath = settings.nixDaemonSocketFile;
|
||||||
|
|
||||||
|
/* Urgh, sockaddr_un allows path names of only 108 characters. So
|
||||||
|
chdir to the socket directory so that we can pass a relative
|
||||||
|
path name. !!! this is probably a bad idea in multi-threaded
|
||||||
|
applications... */
|
||||||
|
AutoCloseFD fdPrevDir = open(".", O_RDONLY);
|
||||||
|
if (fdPrevDir == -1) throw SysError("couldn't open current directory");
|
||||||
|
chdir(dirOf(socketPath).c_str());
|
||||||
|
Path socketPathRel = "./" + baseNameOf(socketPath);
|
||||||
|
|
||||||
|
struct sockaddr_un addr;
|
||||||
|
addr.sun_family = AF_UNIX;
|
||||||
|
if (socketPathRel.size() >= sizeof(addr.sun_path))
|
||||||
|
throw Error(format("socket path `%1%' is too long") % socketPathRel);
|
||||||
|
using namespace std;
|
||||||
|
strcpy(addr.sun_path, socketPathRel.c_str());
|
||||||
|
|
||||||
|
if (connect(fdSocket, (struct sockaddr *) &addr, sizeof(addr)) == -1)
|
||||||
|
throw SysError(format("cannot connect to daemon at `%1%'") % socketPath);
|
||||||
|
|
||||||
|
if (fchdir(fdPrevDir) == -1)
|
||||||
|
throw SysError("couldn't change back to previous directory");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
RemoteStore::~RemoteStore()
|
||||||
|
{
|
||||||
|
try {
|
||||||
|
to.flush();
|
||||||
|
fdSocket.close();
|
||||||
|
if (child != -1)
|
||||||
|
child.wait(true);
|
||||||
|
} catch (...) {
|
||||||
|
ignoreException();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RemoteStore::setOptions()
|
||||||
|
{
|
||||||
|
writeInt(wopSetOptions, to);
|
||||||
|
|
||||||
|
writeInt(settings.keepFailed, to);
|
||||||
|
writeInt(settings.keepGoing, to);
|
||||||
|
writeInt(settings.tryFallback, to);
|
||||||
|
writeInt(verbosity, to);
|
||||||
|
writeInt(settings.maxBuildJobs, to);
|
||||||
|
writeInt(settings.maxSilentTime, to);
|
||||||
|
if (GET_PROTOCOL_MINOR(daemonVersion) >= 2)
|
||||||
|
writeInt(settings.useBuildHook, to);
|
||||||
|
if (GET_PROTOCOL_MINOR(daemonVersion) >= 4) {
|
||||||
|
writeInt(settings.buildVerbosity, to);
|
||||||
|
writeInt(logType, to);
|
||||||
|
writeInt(settings.printBuildTrace, to);
|
||||||
|
}
|
||||||
|
if (GET_PROTOCOL_MINOR(daemonVersion) >= 6)
|
||||||
|
writeInt(settings.buildCores, to);
|
||||||
|
if (GET_PROTOCOL_MINOR(daemonVersion) >= 10)
|
||||||
|
writeInt(settings.useSubstitutes, to);
|
||||||
|
|
||||||
|
if (GET_PROTOCOL_MINOR(daemonVersion) >= 12) {
|
||||||
|
Settings::SettingsMap overrides = settings.getOverrides();
|
||||||
|
writeInt(overrides.size(), to);
|
||||||
|
foreach (Settings::SettingsMap::iterator, i, overrides) {
|
||||||
|
writeString(i->first, to);
|
||||||
|
writeString(i->second, to);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
processStderr();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool RemoteStore::isValidPath(const Path & path)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopIsValidPath, to);
|
||||||
|
writeString(path, to);
|
||||||
|
processStderr();
|
||||||
|
unsigned int reply = readInt(from);
|
||||||
|
return reply != 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
PathSet RemoteStore::queryValidPaths(const PathSet & paths)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
if (GET_PROTOCOL_MINOR(daemonVersion) < 12) {
|
||||||
|
PathSet res;
|
||||||
|
foreach (PathSet::const_iterator, i, paths)
|
||||||
|
if (isValidPath(*i)) res.insert(*i);
|
||||||
|
return res;
|
||||||
|
} else {
|
||||||
|
writeInt(wopQueryValidPaths, to);
|
||||||
|
writeStrings(paths, to);
|
||||||
|
processStderr();
|
||||||
|
return readStorePaths<PathSet>(from);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
PathSet RemoteStore::queryAllValidPaths()
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopQueryAllValidPaths, to);
|
||||||
|
processStderr();
|
||||||
|
return readStorePaths<PathSet>(from);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
PathSet RemoteStore::querySubstitutablePaths(const PathSet & paths)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
if (GET_PROTOCOL_MINOR(daemonVersion) < 12) {
|
||||||
|
PathSet res;
|
||||||
|
foreach (PathSet::const_iterator, i, paths) {
|
||||||
|
writeInt(wopHasSubstitutes, to);
|
||||||
|
writeString(*i, to);
|
||||||
|
processStderr();
|
||||||
|
if (readInt(from)) res.insert(*i);
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
} else {
|
||||||
|
writeInt(wopQuerySubstitutablePaths, to);
|
||||||
|
writeStrings(paths, to);
|
||||||
|
processStderr();
|
||||||
|
return readStorePaths<PathSet>(from);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RemoteStore::querySubstitutablePathInfos(const PathSet & paths,
|
||||||
|
SubstitutablePathInfos & infos)
|
||||||
|
{
|
||||||
|
if (paths.empty()) return;
|
||||||
|
|
||||||
|
openConnection();
|
||||||
|
|
||||||
|
if (GET_PROTOCOL_MINOR(daemonVersion) < 3) return;
|
||||||
|
|
||||||
|
if (GET_PROTOCOL_MINOR(daemonVersion) < 12) {
|
||||||
|
|
||||||
|
foreach (PathSet::const_iterator, i, paths) {
|
||||||
|
SubstitutablePathInfo info;
|
||||||
|
writeInt(wopQuerySubstitutablePathInfo, to);
|
||||||
|
writeString(*i, to);
|
||||||
|
processStderr();
|
||||||
|
unsigned int reply = readInt(from);
|
||||||
|
if (reply == 0) continue;
|
||||||
|
info.deriver = readString(from);
|
||||||
|
if (info.deriver != "") assertStorePath(info.deriver);
|
||||||
|
info.references = readStorePaths<PathSet>(from);
|
||||||
|
info.downloadSize = readLongLong(from);
|
||||||
|
info.narSize = GET_PROTOCOL_MINOR(daemonVersion) >= 7 ? readLongLong(from) : 0;
|
||||||
|
infos[*i] = info;
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
writeInt(wopQuerySubstitutablePathInfos, to);
|
||||||
|
writeStrings(paths, to);
|
||||||
|
processStderr();
|
||||||
|
unsigned int count = readInt(from);
|
||||||
|
for (unsigned int n = 0; n < count; n++) {
|
||||||
|
Path path = readStorePath(from);
|
||||||
|
SubstitutablePathInfo & info(infos[path]);
|
||||||
|
info.deriver = readString(from);
|
||||||
|
if (info.deriver != "") assertStorePath(info.deriver);
|
||||||
|
info.references = readStorePaths<PathSet>(from);
|
||||||
|
info.downloadSize = readLongLong(from);
|
||||||
|
info.narSize = readLongLong(from);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
ValidPathInfo RemoteStore::queryPathInfo(const Path & path)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopQueryPathInfo, to);
|
||||||
|
writeString(path, to);
|
||||||
|
processStderr();
|
||||||
|
ValidPathInfo info;
|
||||||
|
info.path = path;
|
||||||
|
info.deriver = readString(from);
|
||||||
|
if (info.deriver != "") assertStorePath(info.deriver);
|
||||||
|
info.hash = parseHash(htSHA256, readString(from));
|
||||||
|
info.references = readStorePaths<PathSet>(from);
|
||||||
|
info.registrationTime = readInt(from);
|
||||||
|
info.narSize = readLongLong(from);
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Hash RemoteStore::queryPathHash(const Path & path)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopQueryPathHash, to);
|
||||||
|
writeString(path, to);
|
||||||
|
processStderr();
|
||||||
|
string hash = readString(from);
|
||||||
|
return parseHash(htSHA256, hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RemoteStore::queryReferences(const Path & path,
|
||||||
|
PathSet & references)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopQueryReferences, to);
|
||||||
|
writeString(path, to);
|
||||||
|
processStderr();
|
||||||
|
PathSet references2 = readStorePaths<PathSet>(from);
|
||||||
|
references.insert(references2.begin(), references2.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RemoteStore::queryReferrers(const Path & path,
|
||||||
|
PathSet & referrers)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopQueryReferrers, to);
|
||||||
|
writeString(path, to);
|
||||||
|
processStderr();
|
||||||
|
PathSet referrers2 = readStorePaths<PathSet>(from);
|
||||||
|
referrers.insert(referrers2.begin(), referrers2.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Path RemoteStore::queryDeriver(const Path & path)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopQueryDeriver, to);
|
||||||
|
writeString(path, to);
|
||||||
|
processStderr();
|
||||||
|
Path drvPath = readString(from);
|
||||||
|
if (drvPath != "") assertStorePath(drvPath);
|
||||||
|
return drvPath;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
PathSet RemoteStore::queryValidDerivers(const Path & path)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopQueryValidDerivers, to);
|
||||||
|
writeString(path, to);
|
||||||
|
processStderr();
|
||||||
|
return readStorePaths<PathSet>(from);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
PathSet RemoteStore::queryDerivationOutputs(const Path & path)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopQueryDerivationOutputs, to);
|
||||||
|
writeString(path, to);
|
||||||
|
processStderr();
|
||||||
|
return readStorePaths<PathSet>(from);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
PathSet RemoteStore::queryDerivationOutputNames(const Path & path)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopQueryDerivationOutputNames, to);
|
||||||
|
writeString(path, to);
|
||||||
|
processStderr();
|
||||||
|
return readStrings<PathSet>(from);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Path RemoteStore::queryPathFromHashPart(const string & hashPart)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopQueryPathFromHashPart, to);
|
||||||
|
writeString(hashPart, to);
|
||||||
|
processStderr();
|
||||||
|
Path path = readString(from);
|
||||||
|
if (!path.empty()) assertStorePath(path);
|
||||||
|
return path;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Path RemoteStore::addToStore(const Path & _srcPath,
|
||||||
|
bool recursive, HashType hashAlgo, PathFilter & filter, bool repair)
|
||||||
|
{
|
||||||
|
if (repair) throw Error("repairing is not supported when building through the Nix daemon");
|
||||||
|
|
||||||
|
openConnection();
|
||||||
|
|
||||||
|
Path srcPath(absPath(_srcPath));
|
||||||
|
|
||||||
|
writeInt(wopAddToStore, to);
|
||||||
|
writeString(baseNameOf(srcPath), to);
|
||||||
|
/* backwards compatibility hack */
|
||||||
|
writeInt((hashAlgo == htSHA256 && recursive) ? 0 : 1, to);
|
||||||
|
writeInt(recursive ? 1 : 0, to);
|
||||||
|
writeString(printHashType(hashAlgo), to);
|
||||||
|
dumpPath(srcPath, to, filter);
|
||||||
|
processStderr();
|
||||||
|
return readStorePath(from);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Path RemoteStore::addTextToStore(const string & name, const string & s,
|
||||||
|
const PathSet & references, bool repair)
|
||||||
|
{
|
||||||
|
if (repair) throw Error("repairing is not supported when building through the Nix daemon");
|
||||||
|
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopAddTextToStore, to);
|
||||||
|
writeString(name, to);
|
||||||
|
writeString(s, to);
|
||||||
|
writeStrings(references, to);
|
||||||
|
|
||||||
|
processStderr();
|
||||||
|
return readStorePath(from);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RemoteStore::exportPath(const Path & path, bool sign,
|
||||||
|
Sink & sink)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopExportPath, to);
|
||||||
|
writeString(path, to);
|
||||||
|
writeInt(sign ? 1 : 0, to);
|
||||||
|
processStderr(&sink); /* sink receives the actual data */
|
||||||
|
readInt(from);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Paths RemoteStore::importPaths(bool requireSignature, Source & source)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopImportPaths, to);
|
||||||
|
/* We ignore requireSignature, since the worker forces it to true
|
||||||
|
anyway. */
|
||||||
|
processStderr(0, &source);
|
||||||
|
return readStorePaths<Paths>(from);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RemoteStore::buildPaths(const PathSet & drvPaths, BuildMode buildMode)
|
||||||
|
{
|
||||||
|
if (buildMode != bmNormal) throw Error("repairing or checking is not supported when building through the Nix daemon");
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopBuildPaths, to);
|
||||||
|
if (GET_PROTOCOL_MINOR(daemonVersion) >= 13)
|
||||||
|
writeStrings(drvPaths, to);
|
||||||
|
else {
|
||||||
|
/* For backwards compatibility with old daemons, strip output
|
||||||
|
identifiers. */
|
||||||
|
PathSet drvPaths2;
|
||||||
|
foreach (PathSet::const_iterator, i, drvPaths)
|
||||||
|
drvPaths2.insert(string(*i, 0, i->find('!')));
|
||||||
|
writeStrings(drvPaths2, to);
|
||||||
|
}
|
||||||
|
processStderr();
|
||||||
|
readInt(from);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RemoteStore::ensurePath(const Path & path)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopEnsurePath, to);
|
||||||
|
writeString(path, to);
|
||||||
|
processStderr();
|
||||||
|
readInt(from);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RemoteStore::addTempRoot(const Path & path)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopAddTempRoot, to);
|
||||||
|
writeString(path, to);
|
||||||
|
processStderr();
|
||||||
|
readInt(from);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RemoteStore::addIndirectRoot(const Path & path)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopAddIndirectRoot, to);
|
||||||
|
writeString(path, to);
|
||||||
|
processStderr();
|
||||||
|
readInt(from);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RemoteStore::syncWithGC()
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopSyncWithGC, to);
|
||||||
|
processStderr();
|
||||||
|
readInt(from);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Roots RemoteStore::findRoots()
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopFindRoots, to);
|
||||||
|
processStderr();
|
||||||
|
unsigned int count = readInt(from);
|
||||||
|
Roots result;
|
||||||
|
while (count--) {
|
||||||
|
Path link = readString(from);
|
||||||
|
Path target = readStorePath(from);
|
||||||
|
result[link] = target;
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RemoteStore::collectGarbage(const GCOptions & options, GCResults & results)
|
||||||
|
{
|
||||||
|
openConnection(false);
|
||||||
|
|
||||||
|
writeInt(wopCollectGarbage, to);
|
||||||
|
writeInt(options.action, to);
|
||||||
|
writeStrings(options.pathsToDelete, to);
|
||||||
|
writeInt(options.ignoreLiveness, to);
|
||||||
|
writeLongLong(options.maxFreed, to);
|
||||||
|
writeInt(0, to);
|
||||||
|
if (GET_PROTOCOL_MINOR(daemonVersion) >= 5) {
|
||||||
|
/* removed options */
|
||||||
|
writeInt(0, to);
|
||||||
|
writeInt(0, to);
|
||||||
|
}
|
||||||
|
|
||||||
|
processStderr();
|
||||||
|
|
||||||
|
results.paths = readStrings<PathSet>(from);
|
||||||
|
results.bytesFreed = readLongLong(from);
|
||||||
|
readLongLong(from); // obsolete
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
PathSet RemoteStore::queryFailedPaths()
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopQueryFailedPaths, to);
|
||||||
|
processStderr();
|
||||||
|
return readStorePaths<PathSet>(from);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RemoteStore::clearFailedPaths(const PathSet & paths)
|
||||||
|
{
|
||||||
|
openConnection();
|
||||||
|
writeInt(wopClearFailedPaths, to);
|
||||||
|
writeStrings(paths, to);
|
||||||
|
processStderr();
|
||||||
|
readInt(from);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void RemoteStore::processStderr(Sink * sink, Source * source)
|
||||||
|
{
|
||||||
|
to.flush();
|
||||||
|
unsigned int msg;
|
||||||
|
while ((msg = readInt(from)) == STDERR_NEXT
|
||||||
|
|| msg == STDERR_READ || msg == STDERR_WRITE) {
|
||||||
|
if (msg == STDERR_WRITE) {
|
||||||
|
string s = readString(from);
|
||||||
|
if (!sink) throw Error("no sink");
|
||||||
|
(*sink)((const unsigned char *) s.data(), s.size());
|
||||||
|
}
|
||||||
|
else if (msg == STDERR_READ) {
|
||||||
|
if (!source) throw Error("no source");
|
||||||
|
size_t len = readInt(from);
|
||||||
|
unsigned char * buf = new unsigned char[len];
|
||||||
|
AutoDeleteArray<unsigned char> d(buf);
|
||||||
|
writeString(buf, source->read(buf, len), to);
|
||||||
|
to.flush();
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
string s = readString(from);
|
||||||
|
writeToStderr(s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (msg == STDERR_ERROR) {
|
||||||
|
string error = readString(from);
|
||||||
|
unsigned int status = GET_PROTOCOL_MINOR(daemonVersion) >= 8 ? readInt(from) : 1;
|
||||||
|
throw Error(format("%1%") % error, status);
|
||||||
|
}
|
||||||
|
else if (msg != STDERR_LAST)
|
||||||
|
throw Error("protocol error processing standard error");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,104 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "store-api.hh"
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
class Pipe;
|
||||||
|
class Pid;
|
||||||
|
struct FdSink;
|
||||||
|
struct FdSource;
|
||||||
|
|
||||||
|
|
||||||
|
class RemoteStore : public StoreAPI
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
|
||||||
|
RemoteStore();
|
||||||
|
|
||||||
|
~RemoteStore();
|
||||||
|
|
||||||
|
/* Implementations of abstract store API methods. */
|
||||||
|
|
||||||
|
bool isValidPath(const Path & path);
|
||||||
|
|
||||||
|
PathSet queryValidPaths(const PathSet & paths);
|
||||||
|
|
||||||
|
PathSet queryAllValidPaths();
|
||||||
|
|
||||||
|
ValidPathInfo queryPathInfo(const Path & path);
|
||||||
|
|
||||||
|
Hash queryPathHash(const Path & path);
|
||||||
|
|
||||||
|
void queryReferences(const Path & path, PathSet & references);
|
||||||
|
|
||||||
|
void queryReferrers(const Path & path, PathSet & referrers);
|
||||||
|
|
||||||
|
Path queryDeriver(const Path & path);
|
||||||
|
|
||||||
|
PathSet queryValidDerivers(const Path & path);
|
||||||
|
|
||||||
|
PathSet queryDerivationOutputs(const Path & path);
|
||||||
|
|
||||||
|
StringSet queryDerivationOutputNames(const Path & path);
|
||||||
|
|
||||||
|
Path queryPathFromHashPart(const string & hashPart);
|
||||||
|
|
||||||
|
PathSet querySubstitutablePaths(const PathSet & paths);
|
||||||
|
|
||||||
|
void querySubstitutablePathInfos(const PathSet & paths,
|
||||||
|
SubstitutablePathInfos & infos);
|
||||||
|
|
||||||
|
Path addToStore(const Path & srcPath,
|
||||||
|
bool recursive = true, HashType hashAlgo = htSHA256,
|
||||||
|
PathFilter & filter = defaultPathFilter, bool repair = false);
|
||||||
|
|
||||||
|
Path addTextToStore(const string & name, const string & s,
|
||||||
|
const PathSet & references, bool repair = false);
|
||||||
|
|
||||||
|
void exportPath(const Path & path, bool sign,
|
||||||
|
Sink & sink);
|
||||||
|
|
||||||
|
Paths importPaths(bool requireSignature, Source & source);
|
||||||
|
|
||||||
|
void buildPaths(const PathSet & paths, BuildMode buildMode);
|
||||||
|
|
||||||
|
void ensurePath(const Path & path);
|
||||||
|
|
||||||
|
void addTempRoot(const Path & path);
|
||||||
|
|
||||||
|
void addIndirectRoot(const Path & path);
|
||||||
|
|
||||||
|
void syncWithGC();
|
||||||
|
|
||||||
|
Roots findRoots();
|
||||||
|
|
||||||
|
void collectGarbage(const GCOptions & options, GCResults & results);
|
||||||
|
|
||||||
|
PathSet queryFailedPaths();
|
||||||
|
|
||||||
|
void clearFailedPaths(const PathSet & paths);
|
||||||
|
|
||||||
|
private:
|
||||||
|
AutoCloseFD fdSocket;
|
||||||
|
FdSink to;
|
||||||
|
FdSource from;
|
||||||
|
Pid child;
|
||||||
|
unsigned int daemonVersion;
|
||||||
|
bool initialised;
|
||||||
|
|
||||||
|
void openConnection(bool reserveSpace = true);
|
||||||
|
|
||||||
|
void processStderr(Sink * sink = 0, Source * source = 0);
|
||||||
|
|
||||||
|
void connectToDaemon();
|
||||||
|
|
||||||
|
void setOptions();
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,44 @@
|
||||||
|
create table if not exists ValidPaths (
|
||||||
|
id integer primary key autoincrement not null,
|
||||||
|
path text unique not null,
|
||||||
|
hash text not null,
|
||||||
|
registrationTime integer not null,
|
||||||
|
deriver text,
|
||||||
|
narSize integer
|
||||||
|
);
|
||||||
|
|
||||||
|
create table if not exists Refs (
|
||||||
|
referrer integer not null,
|
||||||
|
reference integer not null,
|
||||||
|
primary key (referrer, reference),
|
||||||
|
foreign key (referrer) references ValidPaths(id) on delete cascade,
|
||||||
|
foreign key (reference) references ValidPaths(id) on delete restrict
|
||||||
|
);
|
||||||
|
|
||||||
|
create index if not exists IndexReferrer on Refs(referrer);
|
||||||
|
create index if not exists IndexReference on Refs(reference);
|
||||||
|
|
||||||
|
-- Paths can refer to themselves, causing a tuple (N, N) in the Refs
|
||||||
|
-- table. This causes a deletion of the corresponding row in
|
||||||
|
-- ValidPaths to cause a foreign key constraint violation (due to `on
|
||||||
|
-- delete restrict' on the `reference' column). Therefore, explicitly
|
||||||
|
-- get rid of self-references.
|
||||||
|
create trigger if not exists DeleteSelfRefs before delete on ValidPaths
|
||||||
|
begin
|
||||||
|
delete from Refs where referrer = old.id and reference = old.id;
|
||||||
|
end;
|
||||||
|
|
||||||
|
create table if not exists DerivationOutputs (
|
||||||
|
drv integer not null,
|
||||||
|
id text not null, -- symbolic output id, usually "out"
|
||||||
|
path text not null,
|
||||||
|
primary key (drv, id),
|
||||||
|
foreign key (drv) references ValidPaths(id) on delete cascade
|
||||||
|
);
|
||||||
|
|
||||||
|
create index if not exists IndexDerivationOutputs on DerivationOutputs(path);
|
||||||
|
|
||||||
|
create table if not exists FailedPaths (
|
||||||
|
path text primary key not null,
|
||||||
|
time integer not null
|
||||||
|
);
|
|
@ -0,0 +1,331 @@
|
||||||
|
#include "store-api.hh"
|
||||||
|
#include "globals.hh"
|
||||||
|
#include "util.hh"
|
||||||
|
|
||||||
|
#include <climits>
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
GCOptions::GCOptions()
|
||||||
|
{
|
||||||
|
action = gcDeleteDead;
|
||||||
|
ignoreLiveness = false;
|
||||||
|
maxFreed = ULLONG_MAX;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool isInStore(const Path & path)
|
||||||
|
{
|
||||||
|
return isInDir(path, settings.nixStore);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool isStorePath(const Path & path)
|
||||||
|
{
|
||||||
|
return isInStore(path)
|
||||||
|
&& path.find('/', settings.nixStore.size() + 1) == Path::npos;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void assertStorePath(const Path & path)
|
||||||
|
{
|
||||||
|
if (!isStorePath(path))
|
||||||
|
throw Error(format("path `%1%' is not in the Nix store") % path);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Path toStorePath(const Path & path)
|
||||||
|
{
|
||||||
|
if (!isInStore(path))
|
||||||
|
throw Error(format("path `%1%' is not in the Nix store") % path);
|
||||||
|
Path::size_type slash = path.find('/', settings.nixStore.size() + 1);
|
||||||
|
if (slash == Path::npos)
|
||||||
|
return path;
|
||||||
|
else
|
||||||
|
return Path(path, 0, slash);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Path followLinksToStore(const Path & _path)
|
||||||
|
{
|
||||||
|
Path path = absPath(_path);
|
||||||
|
while (!isInStore(path)) {
|
||||||
|
if (!isLink(path)) break;
|
||||||
|
string target = readLink(path);
|
||||||
|
path = absPath(target, dirOf(path));
|
||||||
|
}
|
||||||
|
if (!isInStore(path))
|
||||||
|
throw Error(format("path `%1%' is not in the Nix store") % path);
|
||||||
|
return path;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Path followLinksToStorePath(const Path & path)
|
||||||
|
{
|
||||||
|
return toStorePath(followLinksToStore(path));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
string storePathToName(const Path & path)
|
||||||
|
{
|
||||||
|
assertStorePath(path);
|
||||||
|
return string(path, settings.nixStore.size() + 34);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void checkStoreName(const string & name)
|
||||||
|
{
|
||||||
|
string validChars = "+-._?=";
|
||||||
|
/* Disallow names starting with a dot for possible security
|
||||||
|
reasons (e.g., "." and ".."). */
|
||||||
|
if (string(name, 0, 1) == ".")
|
||||||
|
throw Error(format("illegal name: `%1%'") % name);
|
||||||
|
foreach (string::const_iterator, i, name)
|
||||||
|
if (!((*i >= 'A' && *i <= 'Z') ||
|
||||||
|
(*i >= 'a' && *i <= 'z') ||
|
||||||
|
(*i >= '0' && *i <= '9') ||
|
||||||
|
validChars.find(*i) != string::npos))
|
||||||
|
{
|
||||||
|
throw Error(format("invalid character `%1%' in name `%2%'")
|
||||||
|
% *i % name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Store paths have the following form:
|
||||||
|
|
||||||
|
<store>/<h>-<name>
|
||||||
|
|
||||||
|
where
|
||||||
|
|
||||||
|
<store> = the location of the Nix store, usually /nix/store
|
||||||
|
|
||||||
|
<name> = a human readable name for the path, typically obtained
|
||||||
|
from the name attribute of the derivation, or the name of the
|
||||||
|
source file from which the store path is created. For derivation
|
||||||
|
outputs other than the default "out" output, the string "-<id>"
|
||||||
|
is suffixed to <name>.
|
||||||
|
|
||||||
|
<h> = base-32 representation of the first 160 bits of a SHA-256
|
||||||
|
hash of <s>; the hash part of the store name
|
||||||
|
|
||||||
|
<s> = the string "<type>:sha256:<h2>:<store>:<name>";
|
||||||
|
note that it includes the location of the store as well as the
|
||||||
|
name to make sure that changes to either of those are reflected
|
||||||
|
in the hash (e.g. you won't get /nix/store/<h>-name1 and
|
||||||
|
/nix/store/<h>-name2 with equal hash parts).
|
||||||
|
|
||||||
|
<type> = one of:
|
||||||
|
"text:<r1>:<r2>:...<rN>"
|
||||||
|
for plain text files written to the store using
|
||||||
|
addTextToStore(); <r1> ... <rN> are the references of the
|
||||||
|
path.
|
||||||
|
"source"
|
||||||
|
for paths copied to the store using addToStore() when recursive
|
||||||
|
= true and hashAlgo = "sha256"
|
||||||
|
"output:<id>"
|
||||||
|
for either the outputs created by derivations, OR paths copied
|
||||||
|
to the store using addToStore() with recursive != true or
|
||||||
|
hashAlgo != "sha256" (in that case "source" is used; it's
|
||||||
|
silly, but it's done that way for compatibility). <id> is the
|
||||||
|
name of the output (usually, "out").
|
||||||
|
|
||||||
|
<h2> = base-16 representation of a SHA-256 hash of:
|
||||||
|
if <type> = "text:...":
|
||||||
|
the string written to the resulting store path
|
||||||
|
if <type> = "source":
|
||||||
|
the serialisation of the path from which this store path is
|
||||||
|
copied, as returned by hashPath()
|
||||||
|
if <type> = "output:out":
|
||||||
|
for non-fixed derivation outputs:
|
||||||
|
the derivation (see hashDerivationModulo() in
|
||||||
|
primops.cc)
|
||||||
|
for paths copied by addToStore() or produced by fixed-output
|
||||||
|
derivations:
|
||||||
|
the string "fixed:out:<rec><algo>:<hash>:", where
|
||||||
|
<rec> = "r:" for recursive (path) hashes, or "" or flat
|
||||||
|
(file) hashes
|
||||||
|
<algo> = "md5", "sha1" or "sha256"
|
||||||
|
<hash> = base-16 representation of the path or flat hash of
|
||||||
|
the contents of the path (or expected contents of the
|
||||||
|
path for fixed-output derivations)
|
||||||
|
|
||||||
|
It would have been nicer to handle fixed-output derivations under
|
||||||
|
"source", e.g. have something like "source:<rec><algo>", but we're
|
||||||
|
stuck with this for now...
|
||||||
|
|
||||||
|
The main reason for this way of computing names is to prevent name
|
||||||
|
collisions (for security). For instance, it shouldn't be feasible
|
||||||
|
to come up with a derivation whose output path collides with the
|
||||||
|
path for a copied source. The former would have a <s> starting with
|
||||||
|
"output:out:", while the latter would have a <2> starting with
|
||||||
|
"source:".
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
Path makeStorePath(const string & type,
|
||||||
|
const Hash & hash, const string & name)
|
||||||
|
{
|
||||||
|
/* e.g., "source:sha256:1abc...:/nix/store:foo.tar.gz" */
|
||||||
|
string s = type + ":sha256:" + printHash(hash) + ":"
|
||||||
|
+ settings.nixStore + ":" + name;
|
||||||
|
|
||||||
|
checkStoreName(name);
|
||||||
|
|
||||||
|
return settings.nixStore + "/"
|
||||||
|
+ printHash32(compressHash(hashString(htSHA256, s), 20))
|
||||||
|
+ "-" + name;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Path makeOutputPath(const string & id,
|
||||||
|
const Hash & hash, const string & name)
|
||||||
|
{
|
||||||
|
return makeStorePath("output:" + id, hash,
|
||||||
|
name + (id == "out" ? "" : "-" + id));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Path makeFixedOutputPath(bool recursive,
|
||||||
|
HashType hashAlgo, Hash hash, string name)
|
||||||
|
{
|
||||||
|
return hashAlgo == htSHA256 && recursive
|
||||||
|
? makeStorePath("source", hash, name)
|
||||||
|
: makeStorePath("output:out", hashString(htSHA256,
|
||||||
|
"fixed:out:" + (recursive ? (string) "r:" : "") +
|
||||||
|
printHashType(hashAlgo) + ":" + printHash(hash) + ":"),
|
||||||
|
name);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::pair<Path, Hash> computeStorePathForPath(const Path & srcPath,
|
||||||
|
bool recursive, HashType hashAlgo, PathFilter & filter)
|
||||||
|
{
|
||||||
|
HashType ht(hashAlgo);
|
||||||
|
Hash h = recursive ? hashPath(ht, srcPath, filter).first : hashFile(ht, srcPath);
|
||||||
|
string name = baseNameOf(srcPath);
|
||||||
|
Path dstPath = makeFixedOutputPath(recursive, hashAlgo, h, name);
|
||||||
|
return std::pair<Path, Hash>(dstPath, h);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Path computeStorePathForText(const string & name, const string & s,
|
||||||
|
const PathSet & references)
|
||||||
|
{
|
||||||
|
Hash hash = hashString(htSHA256, s);
|
||||||
|
/* Stuff the references (if any) into the type. This is a bit
|
||||||
|
hacky, but we can't put them in `s' since that would be
|
||||||
|
ambiguous. */
|
||||||
|
string type = "text";
|
||||||
|
foreach (PathSet::const_iterator, i, references) {
|
||||||
|
type += ":";
|
||||||
|
type += *i;
|
||||||
|
}
|
||||||
|
return makeStorePath(type, hash, name);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Return a string accepted by decodeValidPathInfo() that
|
||||||
|
registers the specified paths as valid. Note: it's the
|
||||||
|
responsibility of the caller to provide a closure. */
|
||||||
|
string StoreAPI::makeValidityRegistration(const PathSet & paths,
|
||||||
|
bool showDerivers, bool showHash)
|
||||||
|
{
|
||||||
|
string s = "";
|
||||||
|
|
||||||
|
foreach (PathSet::iterator, i, paths) {
|
||||||
|
s += *i + "\n";
|
||||||
|
|
||||||
|
ValidPathInfo info = queryPathInfo(*i);
|
||||||
|
|
||||||
|
if (showHash) {
|
||||||
|
s += printHash(info.hash) + "\n";
|
||||||
|
s += (format("%1%\n") % info.narSize).str();
|
||||||
|
}
|
||||||
|
|
||||||
|
Path deriver = showDerivers ? info.deriver : "";
|
||||||
|
s += deriver + "\n";
|
||||||
|
|
||||||
|
s += (format("%1%\n") % info.references.size()).str();
|
||||||
|
|
||||||
|
foreach (PathSet::iterator, j, info.references)
|
||||||
|
s += *j + "\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
ValidPathInfo decodeValidPathInfo(std::istream & str, bool hashGiven)
|
||||||
|
{
|
||||||
|
ValidPathInfo info;
|
||||||
|
getline(str, info.path);
|
||||||
|
if (str.eof()) { info.path = ""; return info; }
|
||||||
|
if (hashGiven) {
|
||||||
|
string s;
|
||||||
|
getline(str, s);
|
||||||
|
info.hash = parseHash(htSHA256, s);
|
||||||
|
getline(str, s);
|
||||||
|
if (!string2Int(s, info.narSize)) throw Error("number expected");
|
||||||
|
}
|
||||||
|
getline(str, info.deriver);
|
||||||
|
string s; int n;
|
||||||
|
getline(str, s);
|
||||||
|
if (!string2Int(s, n)) throw Error("number expected");
|
||||||
|
while (n--) {
|
||||||
|
getline(str, s);
|
||||||
|
info.references.insert(s);
|
||||||
|
}
|
||||||
|
if (!str || str.eof()) throw Error("missing input");
|
||||||
|
return info;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
string showPaths(const PathSet & paths)
|
||||||
|
{
|
||||||
|
string s;
|
||||||
|
foreach (PathSet::const_iterator, i, paths) {
|
||||||
|
if (s.size() != 0) s += ", ";
|
||||||
|
s += "`" + *i + "'";
|
||||||
|
}
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void exportPaths(StoreAPI & store, const Paths & paths,
|
||||||
|
bool sign, Sink & sink)
|
||||||
|
{
|
||||||
|
foreach (Paths::const_iterator, i, paths) {
|
||||||
|
writeInt(1, sink);
|
||||||
|
store.exportPath(*i, sign, sink);
|
||||||
|
}
|
||||||
|
writeInt(0, sink);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#include "local-store.hh"
|
||||||
|
#include "serialise.hh"
|
||||||
|
#include "remote-store.hh"
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
std::shared_ptr<StoreAPI> store;
|
||||||
|
|
||||||
|
|
||||||
|
std::shared_ptr<StoreAPI> openStore(bool reserveSpace)
|
||||||
|
{
|
||||||
|
if (getEnv("NIX_REMOTE") == "")
|
||||||
|
return std::shared_ptr<StoreAPI>(new LocalStore(reserveSpace));
|
||||||
|
else
|
||||||
|
return std::shared_ptr<StoreAPI>(new RemoteStore());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,366 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "hash.hh"
|
||||||
|
#include "serialise.hh"
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <map>
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
typedef std::map<Path, Path> Roots;
|
||||||
|
|
||||||
|
|
||||||
|
struct GCOptions
|
||||||
|
{
|
||||||
|
/* Garbage collector operation:
|
||||||
|
|
||||||
|
- `gcReturnLive': return the set of paths reachable from
|
||||||
|
(i.e. in the closure of) the roots.
|
||||||
|
|
||||||
|
- `gcReturnDead': return the set of paths not reachable from
|
||||||
|
the roots.
|
||||||
|
|
||||||
|
- `gcDeleteDead': actually delete the latter set.
|
||||||
|
|
||||||
|
- `gcDeleteSpecific': delete the paths listed in
|
||||||
|
`pathsToDelete', insofar as they are not reachable.
|
||||||
|
*/
|
||||||
|
typedef enum {
|
||||||
|
gcReturnLive,
|
||||||
|
gcReturnDead,
|
||||||
|
gcDeleteDead,
|
||||||
|
gcDeleteSpecific,
|
||||||
|
} GCAction;
|
||||||
|
|
||||||
|
GCAction action;
|
||||||
|
|
||||||
|
/* If `ignoreLiveness' is set, then reachability from the roots is
|
||||||
|
ignored (dangerous!). However, the paths must still be
|
||||||
|
unreferenced *within* the store (i.e., there can be no other
|
||||||
|
store paths that depend on them). */
|
||||||
|
bool ignoreLiveness;
|
||||||
|
|
||||||
|
/* For `gcDeleteSpecific', the paths to delete. */
|
||||||
|
PathSet pathsToDelete;
|
||||||
|
|
||||||
|
/* Stop after at least `maxFreed' bytes have been freed. */
|
||||||
|
unsigned long long maxFreed;
|
||||||
|
|
||||||
|
GCOptions();
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
struct GCResults
|
||||||
|
{
|
||||||
|
/* Depending on the action, the GC roots, or the paths that would
|
||||||
|
be or have been deleted. */
|
||||||
|
PathSet paths;
|
||||||
|
|
||||||
|
/* For `gcReturnDead', `gcDeleteDead' and `gcDeleteSpecific', the
|
||||||
|
number of bytes that would be or was freed. */
|
||||||
|
unsigned long long bytesFreed;
|
||||||
|
|
||||||
|
GCResults()
|
||||||
|
{
|
||||||
|
bytesFreed = 0;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
struct SubstitutablePathInfo
|
||||||
|
{
|
||||||
|
Path deriver;
|
||||||
|
PathSet references;
|
||||||
|
unsigned long long downloadSize; /* 0 = unknown or inapplicable */
|
||||||
|
unsigned long long narSize; /* 0 = unknown */
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef std::map<Path, SubstitutablePathInfo> SubstitutablePathInfos;
|
||||||
|
|
||||||
|
|
||||||
|
struct ValidPathInfo
|
||||||
|
{
|
||||||
|
Path path;
|
||||||
|
Path deriver;
|
||||||
|
Hash hash;
|
||||||
|
PathSet references;
|
||||||
|
time_t registrationTime;
|
||||||
|
unsigned long long narSize; // 0 = unknown
|
||||||
|
unsigned long long id; // internal use only
|
||||||
|
ValidPathInfo() : registrationTime(0), narSize(0) { }
|
||||||
|
};
|
||||||
|
|
||||||
|
typedef list<ValidPathInfo> ValidPathInfos;
|
||||||
|
|
||||||
|
|
||||||
|
enum BuildMode { bmNormal, bmRepair, bmCheck };
|
||||||
|
|
||||||
|
|
||||||
|
class StoreAPI
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
|
||||||
|
virtual ~StoreAPI() { }
|
||||||
|
|
||||||
|
/* Check whether a path is valid. */
|
||||||
|
virtual bool isValidPath(const Path & path) = 0;
|
||||||
|
|
||||||
|
/* Query which of the given paths is valid. */
|
||||||
|
virtual PathSet queryValidPaths(const PathSet & paths) = 0;
|
||||||
|
|
||||||
|
/* Query the set of all valid paths. */
|
||||||
|
virtual PathSet queryAllValidPaths() = 0;
|
||||||
|
|
||||||
|
/* Query information about a valid path. */
|
||||||
|
virtual ValidPathInfo queryPathInfo(const Path & path) = 0;
|
||||||
|
|
||||||
|
/* Query the hash of a valid path. */
|
||||||
|
virtual Hash queryPathHash(const Path & path) = 0;
|
||||||
|
|
||||||
|
/* Query the set of outgoing FS references for a store path. The
|
||||||
|
result is not cleared. */
|
||||||
|
virtual void queryReferences(const Path & path,
|
||||||
|
PathSet & references) = 0;
|
||||||
|
|
||||||
|
/* Queries the set of incoming FS references for a store path.
|
||||||
|
The result is not cleared. */
|
||||||
|
virtual void queryReferrers(const Path & path,
|
||||||
|
PathSet & referrers) = 0;
|
||||||
|
|
||||||
|
/* Query the deriver of a store path. Return the empty string if
|
||||||
|
no deriver has been set. */
|
||||||
|
virtual Path queryDeriver(const Path & path) = 0;
|
||||||
|
|
||||||
|
/* Return all currently valid derivations that have `path' as an
|
||||||
|
output. (Note that the result of `queryDeriver()' is the
|
||||||
|
derivation that was actually used to produce `path', which may
|
||||||
|
not exist anymore.) */
|
||||||
|
virtual PathSet queryValidDerivers(const Path & path) = 0;
|
||||||
|
|
||||||
|
/* Query the outputs of the derivation denoted by `path'. */
|
||||||
|
virtual PathSet queryDerivationOutputs(const Path & path) = 0;
|
||||||
|
|
||||||
|
/* Query the output names of the derivation denoted by `path'. */
|
||||||
|
virtual StringSet queryDerivationOutputNames(const Path & path) = 0;
|
||||||
|
|
||||||
|
/* Query the full store path given the hash part of a valid store
|
||||||
|
path, or "" if the path doesn't exist. */
|
||||||
|
virtual Path queryPathFromHashPart(const string & hashPart) = 0;
|
||||||
|
|
||||||
|
/* Query which of the given paths have substitutes. */
|
||||||
|
virtual PathSet querySubstitutablePaths(const PathSet & paths) = 0;
|
||||||
|
|
||||||
|
/* Query substitute info (i.e. references, derivers and download
|
||||||
|
sizes) of a set of paths. If a path does not have substitute
|
||||||
|
info, it's omitted from the resulting ‘infos’ map. */
|
||||||
|
virtual void querySubstitutablePathInfos(const PathSet & paths,
|
||||||
|
SubstitutablePathInfos & infos) = 0;
|
||||||
|
|
||||||
|
/* Copy the contents of a path to the store and register the
|
||||||
|
validity the resulting path. The resulting path is returned.
|
||||||
|
The function object `filter' can be used to exclude files (see
|
||||||
|
libutil/archive.hh). */
|
||||||
|
virtual Path addToStore(const Path & srcPath,
|
||||||
|
bool recursive = true, HashType hashAlgo = htSHA256,
|
||||||
|
PathFilter & filter = defaultPathFilter, bool repair = false) = 0;
|
||||||
|
|
||||||
|
/* Like addToStore, but the contents written to the output path is
|
||||||
|
a regular file containing the given string. */
|
||||||
|
virtual Path addTextToStore(const string & name, const string & s,
|
||||||
|
const PathSet & references, bool repair = false) = 0;
|
||||||
|
|
||||||
|
/* Export a store path, that is, create a NAR dump of the store
|
||||||
|
path and append its references and its deriver. Optionally, a
|
||||||
|
cryptographic signature (created by OpenSSL) of the preceding
|
||||||
|
data is attached. */
|
||||||
|
virtual void exportPath(const Path & path, bool sign,
|
||||||
|
Sink & sink) = 0;
|
||||||
|
|
||||||
|
/* Import a sequence of NAR dumps created by exportPaths() into
|
||||||
|
the Nix store. */
|
||||||
|
virtual Paths importPaths(bool requireSignature, Source & source) = 0;
|
||||||
|
|
||||||
|
/* For each path, if it's a derivation, build it. Building a
|
||||||
|
derivation means ensuring that the output paths are valid. If
|
||||||
|
they are already valid, this is a no-op. Otherwise, validity
|
||||||
|
can be reached in two ways. First, if the output paths is
|
||||||
|
substitutable, then build the path that way. Second, the
|
||||||
|
output paths can be created by running the builder, after
|
||||||
|
recursively building any sub-derivations. For inputs that are
|
||||||
|
not derivations, substitute them. */
|
||||||
|
virtual void buildPaths(const PathSet & paths, BuildMode buildMode = bmNormal) = 0;
|
||||||
|
|
||||||
|
/* Ensure that a path is valid. If it is not currently valid, it
|
||||||
|
may be made valid by running a substitute (if defined for the
|
||||||
|
path). */
|
||||||
|
virtual void ensurePath(const Path & path) = 0;
|
||||||
|
|
||||||
|
/* Add a store path as a temporary root of the garbage collector.
|
||||||
|
The root disappears as soon as we exit. */
|
||||||
|
virtual void addTempRoot(const Path & path) = 0;
|
||||||
|
|
||||||
|
/* Add an indirect root, which is merely a symlink to `path' from
|
||||||
|
/nix/var/nix/gcroots/auto/<hash of `path'>. `path' is supposed
|
||||||
|
to be a symlink to a store path. The garbage collector will
|
||||||
|
automatically remove the indirect root when it finds that
|
||||||
|
`path' has disappeared. */
|
||||||
|
virtual void addIndirectRoot(const Path & path) = 0;
|
||||||
|
|
||||||
|
/* Acquire the global GC lock, then immediately release it. This
|
||||||
|
function must be called after registering a new permanent root,
|
||||||
|
but before exiting. Otherwise, it is possible that a running
|
||||||
|
garbage collector doesn't see the new root and deletes the
|
||||||
|
stuff we've just built. By acquiring the lock briefly, we
|
||||||
|
ensure that either:
|
||||||
|
|
||||||
|
- The collector is already running, and so we block until the
|
||||||
|
collector is finished. The collector will know about our
|
||||||
|
*temporary* locks, which should include whatever it is we
|
||||||
|
want to register as a permanent lock.
|
||||||
|
|
||||||
|
- The collector isn't running, or it's just started but hasn't
|
||||||
|
acquired the GC lock yet. In that case we get and release
|
||||||
|
the lock right away, then exit. The collector scans the
|
||||||
|
permanent root and sees our's.
|
||||||
|
|
||||||
|
In either case the permanent root is seen by the collector. */
|
||||||
|
virtual void syncWithGC() = 0;
|
||||||
|
|
||||||
|
/* Find the roots of the garbage collector. Each root is a pair
|
||||||
|
(link, storepath) where `link' is the path of the symlink
|
||||||
|
outside of the Nix store that point to `storePath'. */
|
||||||
|
virtual Roots findRoots() = 0;
|
||||||
|
|
||||||
|
/* Perform a garbage collection. */
|
||||||
|
virtual void collectGarbage(const GCOptions & options, GCResults & results) = 0;
|
||||||
|
|
||||||
|
/* Return the set of paths that have failed to build.*/
|
||||||
|
virtual PathSet queryFailedPaths() = 0;
|
||||||
|
|
||||||
|
/* Clear the "failed" status of the given paths. The special
|
||||||
|
value `*' causes all failed paths to be cleared. */
|
||||||
|
virtual void clearFailedPaths(const PathSet & paths) = 0;
|
||||||
|
|
||||||
|
/* Return a string representing information about the path that
|
||||||
|
can be loaded into the database using `nix-store --load-db' or
|
||||||
|
`nix-store --register-validity'. */
|
||||||
|
string makeValidityRegistration(const PathSet & paths,
|
||||||
|
bool showDerivers, bool showHash);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* !!! These should be part of the store API, I guess. */
|
||||||
|
|
||||||
|
/* Throw an exception if `path' is not directly in the Nix store. */
|
||||||
|
void assertStorePath(const Path & path);
|
||||||
|
|
||||||
|
bool isInStore(const Path & path);
|
||||||
|
bool isStorePath(const Path & path);
|
||||||
|
|
||||||
|
/* Extract the name part of the given store path. */
|
||||||
|
string storePathToName(const Path & path);
|
||||||
|
|
||||||
|
void checkStoreName(const string & name);
|
||||||
|
|
||||||
|
|
||||||
|
/* Chop off the parts after the top-level store name, e.g.,
|
||||||
|
/nix/store/abcd-foo/bar => /nix/store/abcd-foo. */
|
||||||
|
Path toStorePath(const Path & path);
|
||||||
|
|
||||||
|
|
||||||
|
/* Follow symlinks until we end up with a path in the Nix store. */
|
||||||
|
Path followLinksToStore(const Path & path);
|
||||||
|
|
||||||
|
|
||||||
|
/* Same as followLinksToStore(), but apply toStorePath() to the
|
||||||
|
result. */
|
||||||
|
Path followLinksToStorePath(const Path & path);
|
||||||
|
|
||||||
|
|
||||||
|
/* Constructs a unique store path name. */
|
||||||
|
Path makeStorePath(const string & type,
|
||||||
|
const Hash & hash, const string & name);
|
||||||
|
|
||||||
|
Path makeOutputPath(const string & id,
|
||||||
|
const Hash & hash, const string & name);
|
||||||
|
|
||||||
|
Path makeFixedOutputPath(bool recursive,
|
||||||
|
HashType hashAlgo, Hash hash, string name);
|
||||||
|
|
||||||
|
|
||||||
|
/* This is the preparatory part of addToStore() and addToStoreFixed();
|
||||||
|
it computes the store path to which srcPath is to be copied.
|
||||||
|
Returns the store path and the cryptographic hash of the
|
||||||
|
contents of srcPath. */
|
||||||
|
std::pair<Path, Hash> computeStorePathForPath(const Path & srcPath,
|
||||||
|
bool recursive = true, HashType hashAlgo = htSHA256,
|
||||||
|
PathFilter & filter = defaultPathFilter);
|
||||||
|
|
||||||
|
/* Preparatory part of addTextToStore().
|
||||||
|
|
||||||
|
!!! Computation of the path should take the references given to
|
||||||
|
addTextToStore() into account, otherwise we have a (relatively
|
||||||
|
minor) security hole: a caller can register a source file with
|
||||||
|
bogus references. If there are too many references, the path may
|
||||||
|
not be garbage collected when it has to be (not really a problem,
|
||||||
|
the caller could create a root anyway), or it may be garbage
|
||||||
|
collected when it shouldn't be (more serious).
|
||||||
|
|
||||||
|
Hashing the references would solve this (bogus references would
|
||||||
|
simply yield a different store path, so other users wouldn't be
|
||||||
|
affected), but it has some backwards compatibility issues (the
|
||||||
|
hashing scheme changes), so I'm not doing that for now. */
|
||||||
|
Path computeStorePathForText(const string & name, const string & s,
|
||||||
|
const PathSet & references);
|
||||||
|
|
||||||
|
|
||||||
|
/* Remove the temporary roots file for this process. Any temporary
|
||||||
|
root becomes garbage after this point unless it has been registered
|
||||||
|
as a (permanent) root. */
|
||||||
|
void removeTempRoots();
|
||||||
|
|
||||||
|
|
||||||
|
/* Register a permanent GC root. */
|
||||||
|
Path addPermRoot(StoreAPI & store, const Path & storePath,
|
||||||
|
const Path & gcRoot, bool indirect, bool allowOutsideRootsDir = false);
|
||||||
|
|
||||||
|
|
||||||
|
/* Sort a set of paths topologically under the references relation.
|
||||||
|
If p refers to q, then p preceeds q in this list. */
|
||||||
|
Paths topoSortPaths(StoreAPI & store, const PathSet & paths);
|
||||||
|
|
||||||
|
|
||||||
|
/* For now, there is a single global store API object, but we'll
|
||||||
|
purify that in the future. */
|
||||||
|
extern std::shared_ptr<StoreAPI> store;
|
||||||
|
|
||||||
|
|
||||||
|
/* Factory method: open the Nix database, either through the local or
|
||||||
|
remote implementation. */
|
||||||
|
std::shared_ptr<StoreAPI> openStore(bool reserveSpace = true);
|
||||||
|
|
||||||
|
|
||||||
|
/* Display a set of paths in human-readable form (i.e., between quotes
|
||||||
|
and separated by commas). */
|
||||||
|
string showPaths(const PathSet & paths);
|
||||||
|
|
||||||
|
|
||||||
|
ValidPathInfo decodeValidPathInfo(std::istream & str,
|
||||||
|
bool hashGiven = false);
|
||||||
|
|
||||||
|
|
||||||
|
/* Export multiple paths in the format expected by ‘nix-store
|
||||||
|
--import’. */
|
||||||
|
void exportPaths(StoreAPI & store, const Paths & paths,
|
||||||
|
bool sign, Sink & sink);
|
||||||
|
|
||||||
|
|
||||||
|
MakeError(SubstError, Error)
|
||||||
|
MakeError(BuildError, Error) /* denotes a permanent build failure */
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,60 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
#define WORKER_MAGIC_1 0x6e697863
|
||||||
|
#define WORKER_MAGIC_2 0x6478696f
|
||||||
|
|
||||||
|
#define PROTOCOL_VERSION 0x10e
|
||||||
|
#define GET_PROTOCOL_MAJOR(x) ((x) & 0xff00)
|
||||||
|
#define GET_PROTOCOL_MINOR(x) ((x) & 0x00ff)
|
||||||
|
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
wopQuit = 0,
|
||||||
|
wopIsValidPath = 1,
|
||||||
|
wopHasSubstitutes = 3,
|
||||||
|
wopQueryPathHash = 4,
|
||||||
|
wopQueryReferences = 5,
|
||||||
|
wopQueryReferrers = 6,
|
||||||
|
wopAddToStore = 7,
|
||||||
|
wopAddTextToStore = 8,
|
||||||
|
wopBuildPaths = 9,
|
||||||
|
wopEnsurePath = 10,
|
||||||
|
wopAddTempRoot = 11,
|
||||||
|
wopAddIndirectRoot = 12,
|
||||||
|
wopSyncWithGC = 13,
|
||||||
|
wopFindRoots = 14,
|
||||||
|
wopExportPath = 16,
|
||||||
|
wopQueryDeriver = 18,
|
||||||
|
wopSetOptions = 19,
|
||||||
|
wopCollectGarbage = 20,
|
||||||
|
wopQuerySubstitutablePathInfo = 21,
|
||||||
|
wopQueryDerivationOutputs = 22,
|
||||||
|
wopQueryAllValidPaths = 23,
|
||||||
|
wopQueryFailedPaths = 24,
|
||||||
|
wopClearFailedPaths = 25,
|
||||||
|
wopQueryPathInfo = 26,
|
||||||
|
wopImportPaths = 27,
|
||||||
|
wopQueryDerivationOutputNames = 28,
|
||||||
|
wopQueryPathFromHashPart = 29,
|
||||||
|
wopQuerySubstitutablePathInfos = 30,
|
||||||
|
wopQueryValidPaths = 31,
|
||||||
|
wopQuerySubstitutablePaths = 32,
|
||||||
|
wopQueryValidDerivers = 33,
|
||||||
|
} WorkerOp;
|
||||||
|
|
||||||
|
|
||||||
|
#define STDERR_NEXT 0x6f6c6d67
|
||||||
|
#define STDERR_READ 0x64617461 // data needed from source
|
||||||
|
#define STDERR_WRITE 0x64617416 // data for sink
|
||||||
|
#define STDERR_LAST 0x616c7473
|
||||||
|
#define STDERR_ERROR 0x63787470
|
||||||
|
|
||||||
|
|
||||||
|
Path readStorePath(Source & from);
|
||||||
|
template<class T> T readStorePaths(Source & from);
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,55 @@
|
||||||
|
#include "types.hh"
|
||||||
|
#include "util.hh"
|
||||||
|
#include "affinity.hh"
|
||||||
|
|
||||||
|
#if HAVE_SCHED_H
|
||||||
|
#include <sched.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
#if HAVE_SCHED_SETAFFINITY
|
||||||
|
static bool didSaveAffinity = false;
|
||||||
|
static cpu_set_t savedAffinity;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
void setAffinityTo(int cpu)
|
||||||
|
{
|
||||||
|
#if HAVE_SCHED_SETAFFINITY
|
||||||
|
if (sched_getaffinity(0, sizeof(cpu_set_t), &savedAffinity) == -1) return;
|
||||||
|
didSaveAffinity = true;
|
||||||
|
printMsg(lvlDebug, format("locking this thread to CPU %1%") % cpu);
|
||||||
|
cpu_set_t newAffinity;
|
||||||
|
CPU_ZERO(&newAffinity);
|
||||||
|
CPU_SET(cpu, &newAffinity);
|
||||||
|
if (sched_setaffinity(0, sizeof(cpu_set_t), &newAffinity) == -1)
|
||||||
|
printMsg(lvlError, format("failed to lock thread to CPU %1%") % cpu);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int lockToCurrentCPU()
|
||||||
|
{
|
||||||
|
#if HAVE_SCHED_SETAFFINITY
|
||||||
|
int cpu = sched_getcpu();
|
||||||
|
if (cpu != -1) setAffinityTo(cpu);
|
||||||
|
return cpu;
|
||||||
|
#else
|
||||||
|
return -1;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void restoreAffinity()
|
||||||
|
{
|
||||||
|
#if HAVE_SCHED_SETAFFINITY
|
||||||
|
if (!didSaveAffinity) return;
|
||||||
|
if (sched_setaffinity(0, sizeof(cpu_set_t), &savedAffinity) == -1)
|
||||||
|
printMsg(lvlError, "failed to restore affinity %1%");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,9 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
void setAffinityTo(int cpu);
|
||||||
|
int lockToCurrentCPU();
|
||||||
|
void restoreAffinity();
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,335 @@
|
||||||
|
#include "config.h"
|
||||||
|
|
||||||
|
#include <cerrno>
|
||||||
|
#include <algorithm>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#define _XOPEN_SOURCE 600
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <sys/stat.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <dirent.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
|
||||||
|
#include "archive.hh"
|
||||||
|
#include "util.hh"
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
static string archiveVersion1 = "nix-archive-1";
|
||||||
|
|
||||||
|
|
||||||
|
PathFilter defaultPathFilter;
|
||||||
|
|
||||||
|
|
||||||
|
static void dump(const string & path, Sink & sink, PathFilter & filter);
|
||||||
|
|
||||||
|
|
||||||
|
static void dumpEntries(const Path & path, Sink & sink, PathFilter & filter)
|
||||||
|
{
|
||||||
|
Strings names = readDirectory(path);
|
||||||
|
vector<string> names2(names.begin(), names.end());
|
||||||
|
sort(names2.begin(), names2.end());
|
||||||
|
|
||||||
|
for (vector<string>::iterator i = names2.begin();
|
||||||
|
i != names2.end(); ++i)
|
||||||
|
{
|
||||||
|
Path entry = path + "/" + *i;
|
||||||
|
if (filter(entry)) {
|
||||||
|
writeString("entry", sink);
|
||||||
|
writeString("(", sink);
|
||||||
|
writeString("name", sink);
|
||||||
|
writeString(*i, sink);
|
||||||
|
writeString("node", sink);
|
||||||
|
dump(entry, sink, filter);
|
||||||
|
writeString(")", sink);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void dumpContents(const Path & path, size_t size,
|
||||||
|
Sink & sink)
|
||||||
|
{
|
||||||
|
writeString("contents", sink);
|
||||||
|
writeLongLong(size, sink);
|
||||||
|
|
||||||
|
AutoCloseFD fd = open(path.c_str(), O_RDONLY);
|
||||||
|
if (fd == -1) throw SysError(format("opening file `%1%'") % path);
|
||||||
|
|
||||||
|
unsigned char buf[65536];
|
||||||
|
size_t left = size;
|
||||||
|
|
||||||
|
while (left > 0) {
|
||||||
|
size_t n = left > sizeof(buf) ? sizeof(buf) : left;
|
||||||
|
readFull(fd, buf, n);
|
||||||
|
left -= n;
|
||||||
|
sink(buf, n);
|
||||||
|
}
|
||||||
|
|
||||||
|
writePadding(size, sink);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void dump(const Path & path, Sink & sink, PathFilter & filter)
|
||||||
|
{
|
||||||
|
struct stat st;
|
||||||
|
if (lstat(path.c_str(), &st))
|
||||||
|
throw SysError(format("getting attributes of path `%1%'") % path);
|
||||||
|
|
||||||
|
writeString("(", sink);
|
||||||
|
|
||||||
|
if (S_ISREG(st.st_mode)) {
|
||||||
|
writeString("type", sink);
|
||||||
|
writeString("regular", sink);
|
||||||
|
if (st.st_mode & S_IXUSR) {
|
||||||
|
writeString("executable", sink);
|
||||||
|
writeString("", sink);
|
||||||
|
}
|
||||||
|
dumpContents(path, (size_t) st.st_size, sink);
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (S_ISDIR(st.st_mode)) {
|
||||||
|
writeString("type", sink);
|
||||||
|
writeString("directory", sink);
|
||||||
|
dumpEntries(path, sink, filter);
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (S_ISLNK(st.st_mode)) {
|
||||||
|
writeString("type", sink);
|
||||||
|
writeString("symlink", sink);
|
||||||
|
writeString("target", sink);
|
||||||
|
writeString(readLink(path), sink);
|
||||||
|
}
|
||||||
|
|
||||||
|
else throw Error(format("file `%1%' has an unknown type") % path);
|
||||||
|
|
||||||
|
writeString(")", sink);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void dumpPath(const Path & path, Sink & sink, PathFilter & filter)
|
||||||
|
{
|
||||||
|
writeString(archiveVersion1, sink);
|
||||||
|
dump(path, sink, filter);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static SerialisationError badArchive(string s)
|
||||||
|
{
|
||||||
|
return SerialisationError("bad archive: " + s);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void skipGeneric(Source & source)
|
||||||
|
{
|
||||||
|
if (readString(source) == "(") {
|
||||||
|
while (readString(source) != ")")
|
||||||
|
skipGeneric(source);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void parse(ParseSink & sink, Source & source, const Path & path);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
static void parseEntry(ParseSink & sink, Source & source, const Path & path)
|
||||||
|
{
|
||||||
|
string s, name;
|
||||||
|
|
||||||
|
s = readString(source);
|
||||||
|
if (s != "(") throw badArchive("expected open tag");
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
checkInterrupt();
|
||||||
|
|
||||||
|
s = readString(source);
|
||||||
|
|
||||||
|
if (s == ")") {
|
||||||
|
break;
|
||||||
|
} else if (s == "name") {
|
||||||
|
name = readString(source);
|
||||||
|
} else if (s == "node") {
|
||||||
|
if (s == "") throw badArchive("entry name missing");
|
||||||
|
parse(sink, source, path + "/" + name);
|
||||||
|
} else {
|
||||||
|
throw badArchive("unknown field " + s);
|
||||||
|
skipGeneric(source);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void parseContents(ParseSink & sink, Source & source, const Path & path)
|
||||||
|
{
|
||||||
|
unsigned long long size = readLongLong(source);
|
||||||
|
|
||||||
|
sink.preallocateContents(size);
|
||||||
|
|
||||||
|
unsigned long long left = size;
|
||||||
|
unsigned char buf[65536];
|
||||||
|
|
||||||
|
while (left) {
|
||||||
|
checkInterrupt();
|
||||||
|
unsigned int n = sizeof(buf);
|
||||||
|
if ((unsigned long long) n > left) n = left;
|
||||||
|
source(buf, n);
|
||||||
|
sink.receiveContents(buf, n);
|
||||||
|
left -= n;
|
||||||
|
}
|
||||||
|
|
||||||
|
readPadding(size, source);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void parse(ParseSink & sink, Source & source, const Path & path)
|
||||||
|
{
|
||||||
|
string s;
|
||||||
|
|
||||||
|
s = readString(source);
|
||||||
|
if (s != "(") throw badArchive("expected open tag");
|
||||||
|
|
||||||
|
enum { tpUnknown, tpRegular, tpDirectory, tpSymlink } type = tpUnknown;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
checkInterrupt();
|
||||||
|
|
||||||
|
s = readString(source);
|
||||||
|
|
||||||
|
if (s == ")") {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (s == "type") {
|
||||||
|
if (type != tpUnknown)
|
||||||
|
throw badArchive("multiple type fields");
|
||||||
|
string t = readString(source);
|
||||||
|
|
||||||
|
if (t == "regular") {
|
||||||
|
type = tpRegular;
|
||||||
|
sink.createRegularFile(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (t == "directory") {
|
||||||
|
sink.createDirectory(path);
|
||||||
|
type = tpDirectory;
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (t == "symlink") {
|
||||||
|
type = tpSymlink;
|
||||||
|
}
|
||||||
|
|
||||||
|
else throw badArchive("unknown file type " + t);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (s == "contents" && type == tpRegular) {
|
||||||
|
parseContents(sink, source, path);
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (s == "executable" && type == tpRegular) {
|
||||||
|
readString(source);
|
||||||
|
sink.isExecutable();
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (s == "entry" && type == tpDirectory) {
|
||||||
|
parseEntry(sink, source, path);
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (s == "target" && type == tpSymlink) {
|
||||||
|
string target = readString(source);
|
||||||
|
sink.createSymlink(path, target);
|
||||||
|
}
|
||||||
|
|
||||||
|
else {
|
||||||
|
throw badArchive("unknown field " + s);
|
||||||
|
skipGeneric(source);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void parseDump(ParseSink & sink, Source & source)
|
||||||
|
{
|
||||||
|
string version;
|
||||||
|
try {
|
||||||
|
version = readString(source);
|
||||||
|
} catch (SerialisationError & e) {
|
||||||
|
/* This generally means the integer at the start couldn't be
|
||||||
|
decoded. Ignore and throw the exception below. */
|
||||||
|
}
|
||||||
|
if (version != archiveVersion1)
|
||||||
|
throw badArchive("input doesn't look like a Nix archive");
|
||||||
|
parse(sink, source, "");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
struct RestoreSink : ParseSink
|
||||||
|
{
|
||||||
|
Path dstPath;
|
||||||
|
AutoCloseFD fd;
|
||||||
|
|
||||||
|
void createDirectory(const Path & path)
|
||||||
|
{
|
||||||
|
Path p = dstPath + path;
|
||||||
|
if (mkdir(p.c_str(), 0777) == -1)
|
||||||
|
throw SysError(format("creating directory `%1%'") % p);
|
||||||
|
};
|
||||||
|
|
||||||
|
void createRegularFile(const Path & path)
|
||||||
|
{
|
||||||
|
Path p = dstPath + path;
|
||||||
|
fd.close();
|
||||||
|
fd = open(p.c_str(), O_CREAT | O_EXCL | O_WRONLY, 0666);
|
||||||
|
if (fd == -1) throw SysError(format("creating file `%1%'") % p);
|
||||||
|
}
|
||||||
|
|
||||||
|
void isExecutable()
|
||||||
|
{
|
||||||
|
struct stat st;
|
||||||
|
if (fstat(fd, &st) == -1)
|
||||||
|
throw SysError("fstat");
|
||||||
|
if (fchmod(fd, st.st_mode | (S_IXUSR | S_IXGRP | S_IXOTH)) == -1)
|
||||||
|
throw SysError("fchmod");
|
||||||
|
}
|
||||||
|
|
||||||
|
void preallocateContents(unsigned long long len)
|
||||||
|
{
|
||||||
|
#if HAVE_POSIX_FALLOCATE
|
||||||
|
if (len) {
|
||||||
|
errno = posix_fallocate(fd, 0, len);
|
||||||
|
/* Note that EINVAL may indicate that the underlying
|
||||||
|
filesystem doesn't support preallocation (e.g. on
|
||||||
|
OpenSolaris). Since preallocation is just an
|
||||||
|
optimisation, ignore it. */
|
||||||
|
if (errno && errno != EINVAL)
|
||||||
|
throw SysError(format("preallocating file of %1% bytes") % len);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
void receiveContents(unsigned char * data, unsigned int len)
|
||||||
|
{
|
||||||
|
writeFull(fd, data, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
void createSymlink(const Path & path, const string & target)
|
||||||
|
{
|
||||||
|
Path p = dstPath + path;
|
||||||
|
nix::createSymlink(target, p);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
void restorePath(const Path & path, Source & source)
|
||||||
|
{
|
||||||
|
RestoreSink sink;
|
||||||
|
sink.dstPath = path;
|
||||||
|
parseDump(sink, source);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,75 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "types.hh"
|
||||||
|
#include "serialise.hh"
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
/* dumpPath creates a Nix archive of the specified path. The format
|
||||||
|
is as follows:
|
||||||
|
|
||||||
|
IF path points to a REGULAR FILE:
|
||||||
|
dump(path) = attrs(
|
||||||
|
[ ("type", "regular")
|
||||||
|
, ("contents", contents(path))
|
||||||
|
])
|
||||||
|
|
||||||
|
IF path points to a DIRECTORY:
|
||||||
|
dump(path) = attrs(
|
||||||
|
[ ("type", "directory")
|
||||||
|
, ("entries", concat(map(f, sort(entries(path)))))
|
||||||
|
])
|
||||||
|
where f(fn) = attrs(
|
||||||
|
[ ("name", fn)
|
||||||
|
, ("file", dump(path + "/" + fn))
|
||||||
|
])
|
||||||
|
|
||||||
|
where:
|
||||||
|
|
||||||
|
attrs(as) = concat(map(attr, as)) + encN(0)
|
||||||
|
attrs((a, b)) = encS(a) + encS(b)
|
||||||
|
|
||||||
|
encS(s) = encN(len(s)) + s + (padding until next 64-bit boundary)
|
||||||
|
|
||||||
|
encN(n) = 64-bit little-endian encoding of n.
|
||||||
|
|
||||||
|
contents(path) = the contents of a regular file.
|
||||||
|
|
||||||
|
sort(strings) = lexicographic sort by 8-bit value (strcmp).
|
||||||
|
|
||||||
|
entries(path) = the entries of a directory, without `.' and
|
||||||
|
`..'.
|
||||||
|
|
||||||
|
`+' denotes string concatenation. */
|
||||||
|
|
||||||
|
struct PathFilter
|
||||||
|
{
|
||||||
|
virtual ~PathFilter() { }
|
||||||
|
virtual bool operator () (const Path & path) { return true; }
|
||||||
|
};
|
||||||
|
|
||||||
|
extern PathFilter defaultPathFilter;
|
||||||
|
|
||||||
|
void dumpPath(const Path & path, Sink & sink,
|
||||||
|
PathFilter & filter = defaultPathFilter);
|
||||||
|
|
||||||
|
struct ParseSink
|
||||||
|
{
|
||||||
|
virtual void createDirectory(const Path & path) { };
|
||||||
|
|
||||||
|
virtual void createRegularFile(const Path & path) { };
|
||||||
|
virtual void isExecutable() { };
|
||||||
|
virtual void preallocateContents(unsigned long long size) { };
|
||||||
|
virtual void receiveContents(unsigned char * data, unsigned int len) { };
|
||||||
|
|
||||||
|
virtual void createSymlink(const Path & path, const string & target) { };
|
||||||
|
};
|
||||||
|
|
||||||
|
void parseDump(ParseSink & sink, Source & source);
|
||||||
|
|
||||||
|
void restorePath(const Path & path, Source & source);
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,382 @@
|
||||||
|
#include "config.h"
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
#include <cstring>
|
||||||
|
|
||||||
|
#ifdef HAVE_OPENSSL
|
||||||
|
#include <openssl/md5.h>
|
||||||
|
#include <openssl/sha.h>
|
||||||
|
#else
|
||||||
|
extern "C" {
|
||||||
|
#include "md5.h"
|
||||||
|
#include "sha1.h"
|
||||||
|
#include "sha256.h"
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include "hash.hh"
|
||||||
|
#include "archive.hh"
|
||||||
|
#include "util.hh"
|
||||||
|
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <sys/stat.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
Hash::Hash()
|
||||||
|
{
|
||||||
|
type = htUnknown;
|
||||||
|
hashSize = 0;
|
||||||
|
memset(hash, 0, maxHashSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Hash::Hash(HashType type)
|
||||||
|
{
|
||||||
|
this->type = type;
|
||||||
|
if (type == htMD5) hashSize = md5HashSize;
|
||||||
|
else if (type == htSHA1) hashSize = sha1HashSize;
|
||||||
|
else if (type == htSHA256) hashSize = sha256HashSize;
|
||||||
|
else throw Error("unknown hash type");
|
||||||
|
assert(hashSize <= maxHashSize);
|
||||||
|
memset(hash, 0, maxHashSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool Hash::operator == (const Hash & h2) const
|
||||||
|
{
|
||||||
|
if (hashSize != h2.hashSize) return false;
|
||||||
|
for (unsigned int i = 0; i < hashSize; i++)
|
||||||
|
if (hash[i] != h2.hash[i]) return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool Hash::operator != (const Hash & h2) const
|
||||||
|
{
|
||||||
|
return !(*this == h2);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool Hash::operator < (const Hash & h) const
|
||||||
|
{
|
||||||
|
for (unsigned int i = 0; i < hashSize; i++) {
|
||||||
|
if (hash[i] < h.hash[i]) return true;
|
||||||
|
if (hash[i] > h.hash[i]) return false;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
const string base16Chars = "0123456789abcdef";
|
||||||
|
|
||||||
|
|
||||||
|
string printHash(const Hash & hash)
|
||||||
|
{
|
||||||
|
char buf[hash.hashSize * 2];
|
||||||
|
for (unsigned int i = 0; i < hash.hashSize; i++) {
|
||||||
|
buf[i * 2] = base16Chars[hash.hash[i] >> 4];
|
||||||
|
buf[i * 2 + 1] = base16Chars[hash.hash[i] & 0x0f];
|
||||||
|
}
|
||||||
|
return string(buf, hash.hashSize * 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Hash parseHash(HashType ht, const string & s)
|
||||||
|
{
|
||||||
|
Hash hash(ht);
|
||||||
|
if (s.length() != hash.hashSize * 2)
|
||||||
|
throw Error(format("invalid hash `%1%'") % s);
|
||||||
|
for (unsigned int i = 0; i < hash.hashSize; i++) {
|
||||||
|
string s2(s, i * 2, 2);
|
||||||
|
if (!isxdigit(s2[0]) || !isxdigit(s2[1]))
|
||||||
|
throw Error(format("invalid hash `%1%'") % s);
|
||||||
|
std::istringstream str(s2);
|
||||||
|
int n;
|
||||||
|
str >> std::hex >> n;
|
||||||
|
hash.hash[i] = n;
|
||||||
|
}
|
||||||
|
return hash;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static unsigned char divMod(unsigned char * bytes, unsigned char y)
|
||||||
|
{
|
||||||
|
unsigned int borrow = 0;
|
||||||
|
|
||||||
|
int pos = Hash::maxHashSize - 1;
|
||||||
|
while (pos >= 0 && !bytes[pos]) --pos;
|
||||||
|
|
||||||
|
for ( ; pos >= 0; --pos) {
|
||||||
|
unsigned int s = bytes[pos] + (borrow << 8);
|
||||||
|
unsigned int d = s / y;
|
||||||
|
borrow = s % y;
|
||||||
|
bytes[pos] = d;
|
||||||
|
}
|
||||||
|
|
||||||
|
return borrow;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
unsigned int hashLength32(const Hash & hash)
|
||||||
|
{
|
||||||
|
return (hash.hashSize * 8 - 1) / 5 + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// omitted: E O U T
|
||||||
|
const string base32Chars = "0123456789abcdfghijklmnpqrsvwxyz";
|
||||||
|
|
||||||
|
|
||||||
|
string printHash32(const Hash & hash)
|
||||||
|
{
|
||||||
|
Hash hash2(hash);
|
||||||
|
unsigned int len = hashLength32(hash);
|
||||||
|
|
||||||
|
const char * chars = base32Chars.data();
|
||||||
|
|
||||||
|
string s(len, '0');
|
||||||
|
|
||||||
|
int pos = len - 1;
|
||||||
|
while (pos >= 0) {
|
||||||
|
unsigned char digit = divMod(hash2.hash, 32);
|
||||||
|
s[pos--] = chars[digit];
|
||||||
|
}
|
||||||
|
|
||||||
|
for (unsigned int i = 0; i < hash2.maxHashSize; ++i)
|
||||||
|
assert(hash2.hash[i] == 0);
|
||||||
|
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
string printHash16or32(const Hash & hash)
|
||||||
|
{
|
||||||
|
return hash.type == htMD5 ? printHash(hash) : printHash32(hash);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static bool mul(unsigned char * bytes, unsigned char y, int maxSize)
|
||||||
|
{
|
||||||
|
unsigned char carry = 0;
|
||||||
|
|
||||||
|
for (int pos = 0; pos < maxSize; ++pos) {
|
||||||
|
unsigned int m = bytes[pos] * y + carry;
|
||||||
|
bytes[pos] = m & 0xff;
|
||||||
|
carry = m >> 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
return carry;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static bool add(unsigned char * bytes, unsigned char y, int maxSize)
|
||||||
|
{
|
||||||
|
unsigned char carry = y;
|
||||||
|
|
||||||
|
for (int pos = 0; pos < maxSize; ++pos) {
|
||||||
|
unsigned int m = bytes[pos] + carry;
|
||||||
|
bytes[pos] = m & 0xff;
|
||||||
|
carry = m >> 8;
|
||||||
|
if (carry == 0) break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return carry;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Hash parseHash32(HashType ht, const string & s)
|
||||||
|
{
|
||||||
|
Hash hash(ht);
|
||||||
|
|
||||||
|
const char * chars = base32Chars.data();
|
||||||
|
|
||||||
|
for (unsigned int i = 0; i < s.length(); ++i) {
|
||||||
|
char c = s[i];
|
||||||
|
unsigned char digit;
|
||||||
|
for (digit = 0; digit < base32Chars.size(); ++digit) /* !!! slow */
|
||||||
|
if (chars[digit] == c) break;
|
||||||
|
if (digit >= 32)
|
||||||
|
throw Error(format("invalid base-32 hash `%1%'") % s);
|
||||||
|
if (mul(hash.hash, 32, hash.hashSize) ||
|
||||||
|
add(hash.hash, digit, hash.hashSize))
|
||||||
|
throw Error(format("base-32 hash `%1%' is too large") % s);
|
||||||
|
}
|
||||||
|
|
||||||
|
return hash;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Hash parseHash16or32(HashType ht, const string & s)
|
||||||
|
{
|
||||||
|
Hash hash(ht);
|
||||||
|
if (s.size() == hash.hashSize * 2)
|
||||||
|
/* hexadecimal representation */
|
||||||
|
hash = parseHash(ht, s);
|
||||||
|
else if (s.size() == hashLength32(hash))
|
||||||
|
/* base-32 representation */
|
||||||
|
hash = parseHash32(ht, s);
|
||||||
|
else
|
||||||
|
throw Error(format("hash `%1%' has wrong length for hash type `%2%'")
|
||||||
|
% s % printHashType(ht));
|
||||||
|
return hash;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool isHash(const string & s)
|
||||||
|
{
|
||||||
|
if (s.length() != 32) return false;
|
||||||
|
for (int i = 0; i < 32; i++) {
|
||||||
|
char c = s[i];
|
||||||
|
if (!((c >= '0' && c <= '9') ||
|
||||||
|
(c >= 'a' && c <= 'f')))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
struct Ctx
|
||||||
|
{
|
||||||
|
MD5_CTX md5;
|
||||||
|
SHA_CTX sha1;
|
||||||
|
SHA256_CTX sha256;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
static void start(HashType ht, Ctx & ctx)
|
||||||
|
{
|
||||||
|
if (ht == htMD5) MD5_Init(&ctx.md5);
|
||||||
|
else if (ht == htSHA1) SHA1_Init(&ctx.sha1);
|
||||||
|
else if (ht == htSHA256) SHA256_Init(&ctx.sha256);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void update(HashType ht, Ctx & ctx,
|
||||||
|
const unsigned char * bytes, unsigned int len)
|
||||||
|
{
|
||||||
|
if (ht == htMD5) MD5_Update(&ctx.md5, bytes, len);
|
||||||
|
else if (ht == htSHA1) SHA1_Update(&ctx.sha1, bytes, len);
|
||||||
|
else if (ht == htSHA256) SHA256_Update(&ctx.sha256, bytes, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void finish(HashType ht, Ctx & ctx, unsigned char * hash)
|
||||||
|
{
|
||||||
|
if (ht == htMD5) MD5_Final(hash, &ctx.md5);
|
||||||
|
else if (ht == htSHA1) SHA1_Final(hash, &ctx.sha1);
|
||||||
|
else if (ht == htSHA256) SHA256_Final(hash, &ctx.sha256);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Hash hashString(HashType ht, const string & s)
|
||||||
|
{
|
||||||
|
Ctx ctx;
|
||||||
|
Hash hash(ht);
|
||||||
|
start(ht, ctx);
|
||||||
|
update(ht, ctx, (const unsigned char *) s.data(), s.length());
|
||||||
|
finish(ht, ctx, hash.hash);
|
||||||
|
return hash;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Hash hashFile(HashType ht, const Path & path)
|
||||||
|
{
|
||||||
|
Ctx ctx;
|
||||||
|
Hash hash(ht);
|
||||||
|
start(ht, ctx);
|
||||||
|
|
||||||
|
AutoCloseFD fd = open(path.c_str(), O_RDONLY);
|
||||||
|
if (fd == -1) throw SysError(format("opening file `%1%'") % path);
|
||||||
|
|
||||||
|
unsigned char buf[8192];
|
||||||
|
ssize_t n;
|
||||||
|
while ((n = read(fd, buf, sizeof(buf)))) {
|
||||||
|
checkInterrupt();
|
||||||
|
if (n == -1) throw SysError(format("reading file `%1%'") % path);
|
||||||
|
update(ht, ctx, buf, n);
|
||||||
|
}
|
||||||
|
|
||||||
|
finish(ht, ctx, hash.hash);
|
||||||
|
return hash;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
HashSink::HashSink(HashType ht) : ht(ht)
|
||||||
|
{
|
||||||
|
ctx = new Ctx;
|
||||||
|
bytes = 0;
|
||||||
|
start(ht, *ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
HashSink::~HashSink()
|
||||||
|
{
|
||||||
|
bufPos = 0;
|
||||||
|
delete ctx;
|
||||||
|
}
|
||||||
|
|
||||||
|
void HashSink::write(const unsigned char * data, size_t len)
|
||||||
|
{
|
||||||
|
bytes += len;
|
||||||
|
update(ht, *ctx, data, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
HashResult HashSink::finish()
|
||||||
|
{
|
||||||
|
flush();
|
||||||
|
Hash hash(ht);
|
||||||
|
nix::finish(ht, *ctx, hash.hash);
|
||||||
|
return HashResult(hash, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
HashResult HashSink::currentHash()
|
||||||
|
{
|
||||||
|
flush();
|
||||||
|
Ctx ctx2 = *ctx;
|
||||||
|
Hash hash(ht);
|
||||||
|
nix::finish(ht, ctx2, hash.hash);
|
||||||
|
return HashResult(hash, bytes);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
HashResult hashPath(
|
||||||
|
HashType ht, const Path & path, PathFilter & filter)
|
||||||
|
{
|
||||||
|
HashSink sink(ht);
|
||||||
|
dumpPath(path, sink, filter);
|
||||||
|
return sink.finish();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Hash compressHash(const Hash & hash, unsigned int newSize)
|
||||||
|
{
|
||||||
|
Hash h;
|
||||||
|
h.hashSize = newSize;
|
||||||
|
for (unsigned int i = 0; i < hash.hashSize; ++i)
|
||||||
|
h.hash[i % newSize] ^= hash.hash[i];
|
||||||
|
return h;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
HashType parseHashType(const string & s)
|
||||||
|
{
|
||||||
|
if (s == "md5") return htMD5;
|
||||||
|
else if (s == "sha1") return htSHA1;
|
||||||
|
else if (s == "sha256") return htSHA256;
|
||||||
|
else return htUnknown;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
string printHashType(HashType ht)
|
||||||
|
{
|
||||||
|
if (ht == htMD5) return "md5";
|
||||||
|
else if (ht == htSHA1) return "sha1";
|
||||||
|
else if (ht == htSHA256) return "sha256";
|
||||||
|
else throw Error("cannot print unknown hash type");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,113 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "types.hh"
|
||||||
|
#include "serialise.hh"
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
typedef enum { htUnknown, htMD5, htSHA1, htSHA256 } HashType;
|
||||||
|
|
||||||
|
|
||||||
|
const int md5HashSize = 16;
|
||||||
|
const int sha1HashSize = 20;
|
||||||
|
const int sha256HashSize = 32;
|
||||||
|
|
||||||
|
extern const string base32Chars;
|
||||||
|
|
||||||
|
|
||||||
|
struct Hash
|
||||||
|
{
|
||||||
|
static const unsigned int maxHashSize = 32;
|
||||||
|
unsigned int hashSize;
|
||||||
|
unsigned char hash[maxHashSize];
|
||||||
|
|
||||||
|
HashType type;
|
||||||
|
|
||||||
|
/* Create an unusable hash object. */
|
||||||
|
Hash();
|
||||||
|
|
||||||
|
/* Create a zero-filled hash object. */
|
||||||
|
Hash(HashType type);
|
||||||
|
|
||||||
|
/* Check whether two hash are equal. */
|
||||||
|
bool operator == (const Hash & h2) const;
|
||||||
|
|
||||||
|
/* Check whether two hash are not equal. */
|
||||||
|
bool operator != (const Hash & h2) const;
|
||||||
|
|
||||||
|
/* For sorting. */
|
||||||
|
bool operator < (const Hash & h) const;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* Convert a hash to a hexadecimal representation. */
|
||||||
|
string printHash(const Hash & hash);
|
||||||
|
|
||||||
|
/* Parse a hexadecimal representation of a hash code. */
|
||||||
|
Hash parseHash(HashType ht, const string & s);
|
||||||
|
|
||||||
|
/* Returns the length of a base-32 hash representation. */
|
||||||
|
unsigned int hashLength32(const Hash & hash);
|
||||||
|
|
||||||
|
/* Convert a hash to a base-32 representation. */
|
||||||
|
string printHash32(const Hash & hash);
|
||||||
|
|
||||||
|
/* Print a hash in base-16 if it's MD5, or base-32 otherwise. */
|
||||||
|
string printHash16or32(const Hash & hash);
|
||||||
|
|
||||||
|
/* Parse a base-32 representation of a hash code. */
|
||||||
|
Hash parseHash32(HashType ht, const string & s);
|
||||||
|
|
||||||
|
/* Parse a base-16 or base-32 representation of a hash code. */
|
||||||
|
Hash parseHash16or32(HashType ht, const string & s);
|
||||||
|
|
||||||
|
/* Verify that the given string is a valid hash code. */
|
||||||
|
bool isHash(const string & s);
|
||||||
|
|
||||||
|
/* Compute the hash of the given string. */
|
||||||
|
Hash hashString(HashType ht, const string & s);
|
||||||
|
|
||||||
|
/* Compute the hash of the given file. */
|
||||||
|
Hash hashFile(HashType ht, const Path & path);
|
||||||
|
|
||||||
|
/* Compute the hash of the given path. The hash is defined as
|
||||||
|
(essentially) hashString(ht, dumpPath(path)). */
|
||||||
|
struct PathFilter;
|
||||||
|
extern PathFilter defaultPathFilter;
|
||||||
|
typedef std::pair<Hash, unsigned long long> HashResult;
|
||||||
|
HashResult hashPath(HashType ht, const Path & path,
|
||||||
|
PathFilter & filter = defaultPathFilter);
|
||||||
|
|
||||||
|
/* Compress a hash to the specified number of bytes by cyclically
|
||||||
|
XORing bytes together. */
|
||||||
|
Hash compressHash(const Hash & hash, unsigned int newSize);
|
||||||
|
|
||||||
|
/* Parse a string representing a hash type. */
|
||||||
|
HashType parseHashType(const string & s);
|
||||||
|
|
||||||
|
/* And the reverse. */
|
||||||
|
string printHashType(HashType ht);
|
||||||
|
|
||||||
|
|
||||||
|
struct Ctx;
|
||||||
|
|
||||||
|
class HashSink : public BufferedSink
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
HashType ht;
|
||||||
|
Ctx * ctx;
|
||||||
|
unsigned long long bytes;
|
||||||
|
|
||||||
|
public:
|
||||||
|
HashSink(HashType ht);
|
||||||
|
HashSink(const HashSink & h);
|
||||||
|
~HashSink();
|
||||||
|
void write(const unsigned char * data, size_t len);
|
||||||
|
HashResult finish();
|
||||||
|
HashResult currentHash();
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,259 @@
|
||||||
|
#include "serialise.hh"
|
||||||
|
#include "util.hh"
|
||||||
|
|
||||||
|
#include <cstring>
|
||||||
|
#include <cerrno>
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
BufferedSink::~BufferedSink()
|
||||||
|
{
|
||||||
|
/* We can't call flush() here, because C++ for some insane reason
|
||||||
|
doesn't allow you to call virtual methods from a destructor. */
|
||||||
|
assert(!bufPos);
|
||||||
|
delete[] buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void BufferedSink::operator () (const unsigned char * data, size_t len)
|
||||||
|
{
|
||||||
|
if (!buffer) buffer = new unsigned char[bufSize];
|
||||||
|
|
||||||
|
while (len) {
|
||||||
|
/* Optimisation: bypass the buffer if the data exceeds the
|
||||||
|
buffer size. */
|
||||||
|
if (bufPos + len >= bufSize) {
|
||||||
|
flush();
|
||||||
|
write(data, len);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
/* Otherwise, copy the bytes to the buffer. Flush the buffer
|
||||||
|
when it's full. */
|
||||||
|
size_t n = bufPos + len > bufSize ? bufSize - bufPos : len;
|
||||||
|
memcpy(buffer + bufPos, data, n);
|
||||||
|
data += n; bufPos += n; len -= n;
|
||||||
|
if (bufPos == bufSize) flush();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void BufferedSink::flush()
|
||||||
|
{
|
||||||
|
if (bufPos == 0) return;
|
||||||
|
size_t n = bufPos;
|
||||||
|
bufPos = 0; // don't trigger the assert() in ~BufferedSink()
|
||||||
|
write(buffer, n);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
FdSink::~FdSink()
|
||||||
|
{
|
||||||
|
try { flush(); } catch (...) { ignoreException(); }
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void FdSink::write(const unsigned char * data, size_t len)
|
||||||
|
{
|
||||||
|
writeFull(fd, data, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Source::operator () (unsigned char * data, size_t len)
|
||||||
|
{
|
||||||
|
while (len) {
|
||||||
|
size_t n = read(data, len);
|
||||||
|
data += n; len -= n;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
BufferedSource::~BufferedSource()
|
||||||
|
{
|
||||||
|
delete[] buffer;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
size_t BufferedSource::read(unsigned char * data, size_t len)
|
||||||
|
{
|
||||||
|
if (!buffer) buffer = new unsigned char[bufSize];
|
||||||
|
|
||||||
|
if (!bufPosIn) bufPosIn = readUnbuffered(buffer, bufSize);
|
||||||
|
|
||||||
|
/* Copy out the data in the buffer. */
|
||||||
|
size_t n = len > bufPosIn - bufPosOut ? bufPosIn - bufPosOut : len;
|
||||||
|
memcpy(data, buffer + bufPosOut, n);
|
||||||
|
bufPosOut += n;
|
||||||
|
if (bufPosIn == bufPosOut) bufPosIn = bufPosOut = 0;
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool BufferedSource::hasData()
|
||||||
|
{
|
||||||
|
return bufPosOut < bufPosIn;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
size_t FdSource::readUnbuffered(unsigned char * data, size_t len)
|
||||||
|
{
|
||||||
|
ssize_t n;
|
||||||
|
do {
|
||||||
|
checkInterrupt();
|
||||||
|
n = ::read(fd, (char *) data, bufSize);
|
||||||
|
} while (n == -1 && errno == EINTR);
|
||||||
|
if (n == -1) throw SysError("reading from file");
|
||||||
|
if (n == 0) throw EndOfFile("unexpected end-of-file");
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
size_t StringSource::read(unsigned char * data, size_t len)
|
||||||
|
{
|
||||||
|
if (pos == s.size()) throw EndOfFile("end of string reached");
|
||||||
|
size_t n = s.copy((char *) data, len, pos);
|
||||||
|
pos += n;
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void writePadding(size_t len, Sink & sink)
|
||||||
|
{
|
||||||
|
if (len % 8) {
|
||||||
|
unsigned char zero[8];
|
||||||
|
memset(zero, 0, sizeof(zero));
|
||||||
|
sink(zero, 8 - (len % 8));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void writeInt(unsigned int n, Sink & sink)
|
||||||
|
{
|
||||||
|
unsigned char buf[8];
|
||||||
|
memset(buf, 0, sizeof(buf));
|
||||||
|
buf[0] = n & 0xff;
|
||||||
|
buf[1] = (n >> 8) & 0xff;
|
||||||
|
buf[2] = (n >> 16) & 0xff;
|
||||||
|
buf[3] = (n >> 24) & 0xff;
|
||||||
|
sink(buf, sizeof(buf));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void writeLongLong(unsigned long long n, Sink & sink)
|
||||||
|
{
|
||||||
|
unsigned char buf[8];
|
||||||
|
buf[0] = n & 0xff;
|
||||||
|
buf[1] = (n >> 8) & 0xff;
|
||||||
|
buf[2] = (n >> 16) & 0xff;
|
||||||
|
buf[3] = (n >> 24) & 0xff;
|
||||||
|
buf[4] = (n >> 32) & 0xff;
|
||||||
|
buf[5] = (n >> 40) & 0xff;
|
||||||
|
buf[6] = (n >> 48) & 0xff;
|
||||||
|
buf[7] = (n >> 56) & 0xff;
|
||||||
|
sink(buf, sizeof(buf));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void writeString(const unsigned char * buf, size_t len, Sink & sink)
|
||||||
|
{
|
||||||
|
writeInt(len, sink);
|
||||||
|
sink(buf, len);
|
||||||
|
writePadding(len, sink);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void writeString(const string & s, Sink & sink)
|
||||||
|
{
|
||||||
|
writeString((const unsigned char *) s.data(), s.size(), sink);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<class T> void writeStrings(const T & ss, Sink & sink)
|
||||||
|
{
|
||||||
|
writeInt(ss.size(), sink);
|
||||||
|
foreach (typename T::const_iterator, i, ss)
|
||||||
|
writeString(*i, sink);
|
||||||
|
}
|
||||||
|
|
||||||
|
template void writeStrings(const Paths & ss, Sink & sink);
|
||||||
|
template void writeStrings(const PathSet & ss, Sink & sink);
|
||||||
|
|
||||||
|
|
||||||
|
void readPadding(size_t len, Source & source)
|
||||||
|
{
|
||||||
|
if (len % 8) {
|
||||||
|
unsigned char zero[8];
|
||||||
|
size_t n = 8 - (len % 8);
|
||||||
|
source(zero, n);
|
||||||
|
for (unsigned int i = 0; i < n; i++)
|
||||||
|
if (zero[i]) throw SerialisationError("non-zero padding");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
unsigned int readInt(Source & source)
|
||||||
|
{
|
||||||
|
unsigned char buf[8];
|
||||||
|
source(buf, sizeof(buf));
|
||||||
|
if (buf[4] || buf[5] || buf[6] || buf[7])
|
||||||
|
throw SerialisationError("implementation cannot deal with > 32-bit integers");
|
||||||
|
return
|
||||||
|
buf[0] |
|
||||||
|
(buf[1] << 8) |
|
||||||
|
(buf[2] << 16) |
|
||||||
|
(buf[3] << 24);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
unsigned long long readLongLong(Source & source)
|
||||||
|
{
|
||||||
|
unsigned char buf[8];
|
||||||
|
source(buf, sizeof(buf));
|
||||||
|
return
|
||||||
|
((unsigned long long) buf[0]) |
|
||||||
|
((unsigned long long) buf[1] << 8) |
|
||||||
|
((unsigned long long) buf[2] << 16) |
|
||||||
|
((unsigned long long) buf[3] << 24) |
|
||||||
|
((unsigned long long) buf[4] << 32) |
|
||||||
|
((unsigned long long) buf[5] << 40) |
|
||||||
|
((unsigned long long) buf[6] << 48) |
|
||||||
|
((unsigned long long) buf[7] << 56);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
size_t readString(unsigned char * buf, size_t max, Source & source)
|
||||||
|
{
|
||||||
|
size_t len = readInt(source);
|
||||||
|
if (len > max) throw Error("string is too long");
|
||||||
|
source(buf, len);
|
||||||
|
readPadding(len, source);
|
||||||
|
return len;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
string readString(Source & source)
|
||||||
|
{
|
||||||
|
size_t len = readInt(source);
|
||||||
|
unsigned char * buf = new unsigned char[len];
|
||||||
|
AutoDeleteArray<unsigned char> d(buf);
|
||||||
|
source(buf, len);
|
||||||
|
readPadding(len, source);
|
||||||
|
return string((char *) buf, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template<class T> T readStrings(Source & source)
|
||||||
|
{
|
||||||
|
unsigned int count = readInt(source);
|
||||||
|
T ss;
|
||||||
|
while (count--)
|
||||||
|
ss.insert(ss.end(), readString(source));
|
||||||
|
return ss;
|
||||||
|
}
|
||||||
|
|
||||||
|
template Paths readStrings(Source & source);
|
||||||
|
template PathSet readStrings(Source & source);
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,133 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "types.hh"
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
/* Abstract destination of binary data. */
|
||||||
|
struct Sink
|
||||||
|
{
|
||||||
|
virtual ~Sink() { }
|
||||||
|
virtual void operator () (const unsigned char * data, size_t len) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* A buffered abstract sink. */
|
||||||
|
struct BufferedSink : Sink
|
||||||
|
{
|
||||||
|
size_t bufSize, bufPos;
|
||||||
|
unsigned char * buffer;
|
||||||
|
|
||||||
|
BufferedSink(size_t bufSize = 32 * 1024)
|
||||||
|
: bufSize(bufSize), bufPos(0), buffer(0) { }
|
||||||
|
~BufferedSink();
|
||||||
|
|
||||||
|
void operator () (const unsigned char * data, size_t len);
|
||||||
|
|
||||||
|
void flush();
|
||||||
|
|
||||||
|
virtual void write(const unsigned char * data, size_t len) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* Abstract source of binary data. */
|
||||||
|
struct Source
|
||||||
|
{
|
||||||
|
virtual ~Source() { }
|
||||||
|
|
||||||
|
/* Store exactly ‘len’ bytes in the buffer pointed to by ‘data’.
|
||||||
|
It blocks until all the requested data is available, or throws
|
||||||
|
an error if it is not going to be available. */
|
||||||
|
void operator () (unsigned char * data, size_t len);
|
||||||
|
|
||||||
|
/* Store up to ‘len’ in the buffer pointed to by ‘data’, and
|
||||||
|
return the number of bytes stored. If blocks until at least
|
||||||
|
one byte is available. */
|
||||||
|
virtual size_t read(unsigned char * data, size_t len) = 0;
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* A buffered abstract source. */
|
||||||
|
struct BufferedSource : Source
|
||||||
|
{
|
||||||
|
size_t bufSize, bufPosIn, bufPosOut;
|
||||||
|
unsigned char * buffer;
|
||||||
|
|
||||||
|
BufferedSource(size_t bufSize = 32 * 1024)
|
||||||
|
: bufSize(bufSize), bufPosIn(0), bufPosOut(0), buffer(0) { }
|
||||||
|
~BufferedSource();
|
||||||
|
|
||||||
|
size_t read(unsigned char * data, size_t len);
|
||||||
|
|
||||||
|
/* Underlying read call, to be overridden. */
|
||||||
|
virtual size_t readUnbuffered(unsigned char * data, size_t len) = 0;
|
||||||
|
|
||||||
|
bool hasData();
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* A sink that writes data to a file descriptor. */
|
||||||
|
struct FdSink : BufferedSink
|
||||||
|
{
|
||||||
|
int fd;
|
||||||
|
|
||||||
|
FdSink() : fd(-1) { }
|
||||||
|
FdSink(int fd) : fd(fd) { }
|
||||||
|
~FdSink();
|
||||||
|
|
||||||
|
void write(const unsigned char * data, size_t len);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* A source that reads data from a file descriptor. */
|
||||||
|
struct FdSource : BufferedSource
|
||||||
|
{
|
||||||
|
int fd;
|
||||||
|
FdSource() : fd(-1) { }
|
||||||
|
FdSource(int fd) : fd(fd) { }
|
||||||
|
size_t readUnbuffered(unsigned char * data, size_t len);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* A sink that writes data to a string. */
|
||||||
|
struct StringSink : Sink
|
||||||
|
{
|
||||||
|
string s;
|
||||||
|
void operator () (const unsigned char * data, size_t len)
|
||||||
|
{
|
||||||
|
s.append((const char *) data, len);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* A source that reads data from a string. */
|
||||||
|
struct StringSource : Source
|
||||||
|
{
|
||||||
|
const string & s;
|
||||||
|
size_t pos;
|
||||||
|
StringSource(const string & _s) : s(_s), pos(0) { }
|
||||||
|
size_t read(unsigned char * data, size_t len);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
void writePadding(size_t len, Sink & sink);
|
||||||
|
void writeInt(unsigned int n, Sink & sink);
|
||||||
|
void writeLongLong(unsigned long long n, Sink & sink);
|
||||||
|
void writeString(const unsigned char * buf, size_t len, Sink & sink);
|
||||||
|
void writeString(const string & s, Sink & sink);
|
||||||
|
template<class T> void writeStrings(const T & ss, Sink & sink);
|
||||||
|
|
||||||
|
void readPadding(size_t len, Source & source);
|
||||||
|
unsigned int readInt(Source & source);
|
||||||
|
unsigned long long readLongLong(Source & source);
|
||||||
|
size_t readString(unsigned char * buf, size_t max, Source & source);
|
||||||
|
string readString(Source & source);
|
||||||
|
template<class T> T readStrings(Source & source);
|
||||||
|
|
||||||
|
|
||||||
|
MakeError(SerialisationError, Error)
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,86 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "config.h"
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include <list>
|
||||||
|
#include <set>
|
||||||
|
|
||||||
|
#include <boost/format.hpp>
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
/* Inherit some names from other namespaces for convenience. */
|
||||||
|
using std::string;
|
||||||
|
using std::list;
|
||||||
|
using std::set;
|
||||||
|
using std::vector;
|
||||||
|
using boost::format;
|
||||||
|
|
||||||
|
|
||||||
|
struct FormatOrString
|
||||||
|
{
|
||||||
|
string s;
|
||||||
|
FormatOrString(const string & s) : s(s) { };
|
||||||
|
FormatOrString(const format & f) : s(f.str()) { };
|
||||||
|
FormatOrString(const char * s) : s(s) { };
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* BaseError should generally not be caught, as it has Interrupted as
|
||||||
|
a subclass. Catch Error instead. */
|
||||||
|
class BaseError : public std::exception
|
||||||
|
{
|
||||||
|
protected:
|
||||||
|
string prefix_; // used for location traces etc.
|
||||||
|
string err;
|
||||||
|
public:
|
||||||
|
unsigned int status; // exit status
|
||||||
|
BaseError(const FormatOrString & fs, unsigned int status = 1);
|
||||||
|
~BaseError() throw () { };
|
||||||
|
const char * what() const throw () { return err.c_str(); }
|
||||||
|
const string & msg() const throw () { return err; }
|
||||||
|
const string & prefix() const throw () { return prefix_; }
|
||||||
|
BaseError & addPrefix(const FormatOrString & fs);
|
||||||
|
};
|
||||||
|
|
||||||
|
#define MakeError(newClass, superClass) \
|
||||||
|
class newClass : public superClass \
|
||||||
|
{ \
|
||||||
|
public: \
|
||||||
|
newClass(const FormatOrString & fs, unsigned int status = 1) : superClass(fs, status) { }; \
|
||||||
|
};
|
||||||
|
|
||||||
|
MakeError(Error, BaseError)
|
||||||
|
|
||||||
|
class SysError : public Error
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
int errNo;
|
||||||
|
SysError(const FormatOrString & fs);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
typedef list<string> Strings;
|
||||||
|
typedef set<string> StringSet;
|
||||||
|
|
||||||
|
|
||||||
|
/* Paths are just strings. */
|
||||||
|
typedef string Path;
|
||||||
|
typedef list<Path> Paths;
|
||||||
|
typedef set<Path> PathSet;
|
||||||
|
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
lvlError = 0,
|
||||||
|
lvlInfo,
|
||||||
|
lvlTalkative,
|
||||||
|
lvlChatty,
|
||||||
|
lvlDebug,
|
||||||
|
lvlVomit
|
||||||
|
} Verbosity;
|
||||||
|
|
||||||
|
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,349 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "types.hh"
|
||||||
|
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <sys/stat.h>
|
||||||
|
#include <dirent.h>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <signal.h>
|
||||||
|
|
||||||
|
#include <cstdio>
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
#define foreach(it_type, it, collection) \
|
||||||
|
for (it_type it = (collection).begin(); it != (collection).end(); ++it)
|
||||||
|
|
||||||
|
#define foreach_reverse(it_type, it, collection) \
|
||||||
|
for (it_type it = (collection).rbegin(); it != (collection).rend(); ++it)
|
||||||
|
|
||||||
|
|
||||||
|
/* Return an environment variable. */
|
||||||
|
string getEnv(const string & key, const string & def = "");
|
||||||
|
|
||||||
|
/* Return an absolutized path, resolving paths relative to the
|
||||||
|
specified directory, or the current directory otherwise. The path
|
||||||
|
is also canonicalised. */
|
||||||
|
Path absPath(Path path, Path dir = "");
|
||||||
|
|
||||||
|
/* Canonicalise a path by removing all `.' or `..' components and
|
||||||
|
double or trailing slashes. Optionally resolves all symlink
|
||||||
|
components such that each component of the resulting path is *not*
|
||||||
|
a symbolic link. */
|
||||||
|
Path canonPath(const Path & path, bool resolveSymlinks = false);
|
||||||
|
|
||||||
|
/* Return the directory part of the given canonical path, i.e.,
|
||||||
|
everything before the final `/'. If the path is the root or an
|
||||||
|
immediate child thereof (e.g., `/foo'), this means an empty string
|
||||||
|
is returned. */
|
||||||
|
Path dirOf(const Path & path);
|
||||||
|
|
||||||
|
/* Return the base name of the given canonical path, i.e., everything
|
||||||
|
following the final `/'. */
|
||||||
|
string baseNameOf(const Path & path);
|
||||||
|
|
||||||
|
/* Check whether a given path is a descendant of the given
|
||||||
|
directory. */
|
||||||
|
bool isInDir(const Path & path, const Path & dir);
|
||||||
|
|
||||||
|
/* Get status of `path'. */
|
||||||
|
struct stat lstat(const Path & path);
|
||||||
|
|
||||||
|
/* Return true iff the given path exists. */
|
||||||
|
bool pathExists(const Path & path);
|
||||||
|
|
||||||
|
/* Read the contents (target) of a symbolic link. The result is not
|
||||||
|
in any way canonicalised. */
|
||||||
|
Path readLink(const Path & path);
|
||||||
|
|
||||||
|
bool isLink(const Path & path);
|
||||||
|
|
||||||
|
/* Read the contents of a directory. The entries `.' and `..' are
|
||||||
|
removed. */
|
||||||
|
Strings readDirectory(const Path & path);
|
||||||
|
|
||||||
|
/* Read the contents of a file into a string. */
|
||||||
|
string readFile(int fd);
|
||||||
|
string readFile(const Path & path, bool drain = false);
|
||||||
|
|
||||||
|
/* Write a string to a file. */
|
||||||
|
void writeFile(const Path & path, const string & s);
|
||||||
|
|
||||||
|
/* Read a line from a file descriptor. */
|
||||||
|
string readLine(int fd);
|
||||||
|
|
||||||
|
/* Write a line to a file descriptor. */
|
||||||
|
void writeLine(int fd, string s);
|
||||||
|
|
||||||
|
/* Delete a path; i.e., in the case of a directory, it is deleted
|
||||||
|
recursively. Don't use this at home, kids. The second variant
|
||||||
|
returns the number of bytes and blocks freed. */
|
||||||
|
void deletePath(const Path & path);
|
||||||
|
|
||||||
|
void deletePath(const Path & path, unsigned long long & bytesFreed);
|
||||||
|
|
||||||
|
/* Create a temporary directory. */
|
||||||
|
Path createTempDir(const Path & tmpRoot = "", const Path & prefix = "nix",
|
||||||
|
bool includePid = true, bool useGlobalCounter = true, mode_t mode = 0755);
|
||||||
|
|
||||||
|
/* Create a directory and all its parents, if necessary. Returns the
|
||||||
|
list of created directories, in order of creation. */
|
||||||
|
Paths createDirs(const Path & path);
|
||||||
|
|
||||||
|
/* Create a symlink. */
|
||||||
|
void createSymlink(const Path & target, const Path & link);
|
||||||
|
|
||||||
|
|
||||||
|
template<class T, class A>
|
||||||
|
T singleton(const A & a)
|
||||||
|
{
|
||||||
|
T t;
|
||||||
|
t.insert(a);
|
||||||
|
return t;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Messages. */
|
||||||
|
|
||||||
|
|
||||||
|
typedef enum {
|
||||||
|
ltPretty, /* nice, nested output */
|
||||||
|
ltEscapes, /* nesting indicated using escape codes (for log2xml) */
|
||||||
|
ltFlat /* no nesting */
|
||||||
|
} LogType;
|
||||||
|
|
||||||
|
extern LogType logType;
|
||||||
|
extern Verbosity verbosity; /* suppress msgs > this */
|
||||||
|
|
||||||
|
class Nest
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
bool nest;
|
||||||
|
public:
|
||||||
|
Nest();
|
||||||
|
~Nest();
|
||||||
|
void open(Verbosity level, const FormatOrString & fs);
|
||||||
|
void close();
|
||||||
|
};
|
||||||
|
|
||||||
|
void printMsg_(Verbosity level, const FormatOrString & fs);
|
||||||
|
|
||||||
|
#define startNest(varName, level, f) \
|
||||||
|
Nest varName; \
|
||||||
|
if (level <= verbosity) { \
|
||||||
|
varName.open(level, (f)); \
|
||||||
|
}
|
||||||
|
|
||||||
|
#define printMsg(level, f) \
|
||||||
|
do { \
|
||||||
|
if (level <= verbosity) { \
|
||||||
|
printMsg_(level, (f)); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define debug(f) printMsg(lvlDebug, f)
|
||||||
|
|
||||||
|
void warnOnce(bool & haveWarned, const FormatOrString & fs);
|
||||||
|
|
||||||
|
void writeToStderr(const string & s);
|
||||||
|
|
||||||
|
extern void (*_writeToStderr) (const unsigned char * buf, size_t count);
|
||||||
|
|
||||||
|
|
||||||
|
/* Wrappers arount read()/write() that read/write exactly the
|
||||||
|
requested number of bytes. */
|
||||||
|
void readFull(int fd, unsigned char * buf, size_t count);
|
||||||
|
void writeFull(int fd, const unsigned char * buf, size_t count);
|
||||||
|
|
||||||
|
MakeError(EndOfFile, Error)
|
||||||
|
|
||||||
|
|
||||||
|
/* Read a file descriptor until EOF occurs. */
|
||||||
|
string drainFD(int fd);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/* Automatic cleanup of resources. */
|
||||||
|
|
||||||
|
|
||||||
|
template <class T>
|
||||||
|
struct AutoDeleteArray
|
||||||
|
{
|
||||||
|
T * p;
|
||||||
|
AutoDeleteArray(T * p) : p(p) { }
|
||||||
|
~AutoDeleteArray()
|
||||||
|
{
|
||||||
|
delete [] p;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class AutoDelete
|
||||||
|
{
|
||||||
|
Path path;
|
||||||
|
bool del;
|
||||||
|
bool recursive;
|
||||||
|
public:
|
||||||
|
AutoDelete(const Path & p, bool recursive = true);
|
||||||
|
~AutoDelete();
|
||||||
|
void cancel();
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class AutoCloseFD
|
||||||
|
{
|
||||||
|
int fd;
|
||||||
|
public:
|
||||||
|
AutoCloseFD();
|
||||||
|
AutoCloseFD(int fd);
|
||||||
|
AutoCloseFD(const AutoCloseFD & fd);
|
||||||
|
~AutoCloseFD();
|
||||||
|
void operator =(int fd);
|
||||||
|
operator int() const;
|
||||||
|
void close();
|
||||||
|
bool isOpen();
|
||||||
|
int borrow();
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class Pipe
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
AutoCloseFD readSide, writeSide;
|
||||||
|
void create();
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class AutoCloseDir
|
||||||
|
{
|
||||||
|
DIR * dir;
|
||||||
|
public:
|
||||||
|
AutoCloseDir();
|
||||||
|
AutoCloseDir(DIR * dir);
|
||||||
|
~AutoCloseDir();
|
||||||
|
void operator =(DIR * dir);
|
||||||
|
operator DIR *();
|
||||||
|
void close();
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class Pid
|
||||||
|
{
|
||||||
|
pid_t pid;
|
||||||
|
bool separatePG;
|
||||||
|
int killSignal;
|
||||||
|
public:
|
||||||
|
Pid();
|
||||||
|
~Pid();
|
||||||
|
void operator =(pid_t pid);
|
||||||
|
operator pid_t();
|
||||||
|
void kill();
|
||||||
|
int wait(bool block);
|
||||||
|
void setSeparatePG(bool separatePG);
|
||||||
|
void setKillSignal(int signal);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* Kill all processes running under the specified uid by sending them
|
||||||
|
a SIGKILL. */
|
||||||
|
void killUser(uid_t uid);
|
||||||
|
|
||||||
|
|
||||||
|
/* Run a program and return its stdout in a string (i.e., like the
|
||||||
|
shell backtick operator). */
|
||||||
|
string runProgram(Path program, bool searchPath = false,
|
||||||
|
const Strings & args = Strings());
|
||||||
|
|
||||||
|
/* Close all file descriptors except stdin, stdout, stderr, and those
|
||||||
|
listed in the given set. Good practice in child processes. */
|
||||||
|
void closeMostFDs(const set<int> & exceptions);
|
||||||
|
|
||||||
|
/* Set the close-on-exec flag for the given file descriptor. */
|
||||||
|
void closeOnExec(int fd);
|
||||||
|
|
||||||
|
/* Call vfork() if available, otherwise fork(). */
|
||||||
|
extern pid_t (*maybeVfork)();
|
||||||
|
|
||||||
|
|
||||||
|
/* User interruption. */
|
||||||
|
|
||||||
|
extern volatile sig_atomic_t _isInterrupted;
|
||||||
|
|
||||||
|
void _interrupted();
|
||||||
|
|
||||||
|
void inline checkInterrupt()
|
||||||
|
{
|
||||||
|
if (_isInterrupted) _interrupted();
|
||||||
|
}
|
||||||
|
|
||||||
|
MakeError(Interrupted, BaseError)
|
||||||
|
|
||||||
|
|
||||||
|
/* String tokenizer. */
|
||||||
|
template<class C> C tokenizeString(const string & s, const string & separators = " \t\n\r");
|
||||||
|
|
||||||
|
|
||||||
|
/* Concatenate the given strings with a separator between the
|
||||||
|
elements. */
|
||||||
|
string concatStringsSep(const string & sep, const Strings & ss);
|
||||||
|
string concatStringsSep(const string & sep, const StringSet & ss);
|
||||||
|
|
||||||
|
|
||||||
|
/* Remove trailing whitespace from a string. */
|
||||||
|
string chomp(const string & s);
|
||||||
|
|
||||||
|
|
||||||
|
/* Convert the exit status of a child as returned by wait() into an
|
||||||
|
error string. */
|
||||||
|
string statusToString(int status);
|
||||||
|
|
||||||
|
bool statusOk(int status);
|
||||||
|
|
||||||
|
|
||||||
|
/* Parse a string into an integer. */
|
||||||
|
template<class N> bool string2Int(const string & s, N & n)
|
||||||
|
{
|
||||||
|
std::istringstream str(s);
|
||||||
|
str >> n;
|
||||||
|
return str && str.get() == EOF;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class N> string int2String(N n)
|
||||||
|
{
|
||||||
|
std::ostringstream str;
|
||||||
|
str << n;
|
||||||
|
return str.str();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Return true iff `s' ends in `suffix'. */
|
||||||
|
bool hasSuffix(const string & s, const string & suffix);
|
||||||
|
|
||||||
|
|
||||||
|
/* Read string `s' from stream `str'. */
|
||||||
|
void expect(std::istream & str, const string & s);
|
||||||
|
|
||||||
|
|
||||||
|
/* Read a C-style string from stream `str'. */
|
||||||
|
string parseString(std::istream & str);
|
||||||
|
|
||||||
|
|
||||||
|
/* Utility function used to parse legacy ATerms. */
|
||||||
|
bool endOfList(std::istream & str);
|
||||||
|
|
||||||
|
|
||||||
|
/* Escape a string that contains octal-encoded escape codes such as
|
||||||
|
used in /etc/fstab and /proc/mounts (e.g. "foo\040bar" decodes to
|
||||||
|
"foo bar"). */
|
||||||
|
string decodeOctalEscaped(const string & s);
|
||||||
|
|
||||||
|
|
||||||
|
/* Exception handling in destructors: print an error message, then
|
||||||
|
ignore the exception. */
|
||||||
|
void ignoreException();
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,94 @@
|
||||||
|
#include <assert.h>
|
||||||
|
|
||||||
|
#include "xml-writer.hh"
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
|
||||||
|
XMLWriter::XMLWriter(bool indent, std::ostream & output)
|
||||||
|
: output(output), indent(indent)
|
||||||
|
{
|
||||||
|
output << "<?xml version='1.0' encoding='utf-8'?>" << std::endl;
|
||||||
|
closed = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
XMLWriter::~XMLWriter()
|
||||||
|
{
|
||||||
|
close();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void XMLWriter::close()
|
||||||
|
{
|
||||||
|
if (closed) return;
|
||||||
|
while (!pendingElems.empty()) closeElement();
|
||||||
|
closed = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void XMLWriter::indent_(unsigned int depth)
|
||||||
|
{
|
||||||
|
if (!indent) return;
|
||||||
|
output << string(depth * 2, ' ');
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void XMLWriter::openElement(const string & name,
|
||||||
|
const XMLAttrs & attrs)
|
||||||
|
{
|
||||||
|
assert(!closed);
|
||||||
|
indent_(pendingElems.size());
|
||||||
|
output << "<" << name;
|
||||||
|
writeAttrs(attrs);
|
||||||
|
output << ">";
|
||||||
|
if (indent) output << std::endl;
|
||||||
|
pendingElems.push_back(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void XMLWriter::closeElement()
|
||||||
|
{
|
||||||
|
assert(!pendingElems.empty());
|
||||||
|
indent_(pendingElems.size() - 1);
|
||||||
|
output << "</" << pendingElems.back() << ">";
|
||||||
|
if (indent) output << std::endl;
|
||||||
|
pendingElems.pop_back();
|
||||||
|
if (pendingElems.empty()) closed = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void XMLWriter::writeEmptyElement(const string & name,
|
||||||
|
const XMLAttrs & attrs)
|
||||||
|
{
|
||||||
|
assert(!closed);
|
||||||
|
indent_(pendingElems.size());
|
||||||
|
output << "<" << name;
|
||||||
|
writeAttrs(attrs);
|
||||||
|
output << " />";
|
||||||
|
if (indent) output << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void XMLWriter::writeAttrs(const XMLAttrs & attrs)
|
||||||
|
{
|
||||||
|
for (XMLAttrs::const_iterator i = attrs.begin(); i != attrs.end(); ++i) {
|
||||||
|
output << " " << i->first << "=\"";
|
||||||
|
for (unsigned int j = 0; j < i->second.size(); ++j) {
|
||||||
|
char c = i->second[j];
|
||||||
|
if (c == '"') output << """;
|
||||||
|
else if (c == '<') output << "<";
|
||||||
|
else if (c == '>') output << ">";
|
||||||
|
else if (c == '&') output << "&";
|
||||||
|
/* Escape newlines to prevent attribute normalisation (see
|
||||||
|
XML spec, section 3.3.3. */
|
||||||
|
else if (c == '\n') output << "
";
|
||||||
|
else output << c;
|
||||||
|
}
|
||||||
|
output << "\"";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,69 @@
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
#include <string>
|
||||||
|
#include <list>
|
||||||
|
#include <map>
|
||||||
|
|
||||||
|
|
||||||
|
namespace nix {
|
||||||
|
|
||||||
|
using std::string;
|
||||||
|
using std::map;
|
||||||
|
using std::list;
|
||||||
|
|
||||||
|
|
||||||
|
typedef map<string, string> XMLAttrs;
|
||||||
|
|
||||||
|
|
||||||
|
class XMLWriter
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
|
||||||
|
std::ostream & output;
|
||||||
|
|
||||||
|
bool indent;
|
||||||
|
bool closed;
|
||||||
|
|
||||||
|
list<string> pendingElems;
|
||||||
|
|
||||||
|
public:
|
||||||
|
|
||||||
|
XMLWriter(bool indent, std::ostream & output);
|
||||||
|
~XMLWriter();
|
||||||
|
|
||||||
|
void close();
|
||||||
|
|
||||||
|
void openElement(const string & name,
|
||||||
|
const XMLAttrs & attrs = XMLAttrs());
|
||||||
|
void closeElement();
|
||||||
|
|
||||||
|
void writeEmptyElement(const string & name,
|
||||||
|
const XMLAttrs & attrs = XMLAttrs());
|
||||||
|
|
||||||
|
private:
|
||||||
|
void writeAttrs(const XMLAttrs & attrs);
|
||||||
|
|
||||||
|
void indent_(unsigned int depth);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class XMLOpenElement
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
XMLWriter & writer;
|
||||||
|
public:
|
||||||
|
XMLOpenElement(XMLWriter & writer, const string & name,
|
||||||
|
const XMLAttrs & attrs = XMLAttrs())
|
||||||
|
: writer(writer)
|
||||||
|
{
|
||||||
|
writer.openElement(name, attrs);
|
||||||
|
}
|
||||||
|
~XMLOpenElement()
|
||||||
|
{
|
||||||
|
writer.closeElement();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,939 @@
|
||||||
|
#include "shared.hh"
|
||||||
|
#include "local-store.hh"
|
||||||
|
#include "util.hh"
|
||||||
|
#include "serialise.hh"
|
||||||
|
#include "worker-protocol.hh"
|
||||||
|
#include "archive.hh"
|
||||||
|
#include "affinity.hh"
|
||||||
|
#include "globals.hh"
|
||||||
|
|
||||||
|
#include <cstring>
|
||||||
|
#include <unistd.h>
|
||||||
|
#include <signal.h>
|
||||||
|
#include <sys/types.h>
|
||||||
|
#include <sys/wait.h>
|
||||||
|
#include <sys/stat.h>
|
||||||
|
#include <sys/socket.h>
|
||||||
|
#include <sys/un.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
#include <errno.h>
|
||||||
|
|
||||||
|
using namespace nix;
|
||||||
|
|
||||||
|
|
||||||
|
/* On platforms that have O_ASYNC, we can detect when a client
|
||||||
|
disconnects and immediately kill any ongoing builds. On platforms
|
||||||
|
that lack it, we only notice the disconnection the next time we try
|
||||||
|
to write to the client. So if you have a builder that never
|
||||||
|
generates output on stdout/stderr, the daemon will never notice
|
||||||
|
that the client has disconnected until the builder terminates.
|
||||||
|
|
||||||
|
GNU/Hurd does have O_ASYNC, but its Unix-domain socket translator
|
||||||
|
(pflocal) does not implement F_SETOWN. See
|
||||||
|
<http://lists.gnu.org/archive/html/bug-guix/2013-07/msg00021.html> for
|
||||||
|
details.*/
|
||||||
|
#if defined O_ASYNC && !defined __GNU__
|
||||||
|
#define HAVE_HUP_NOTIFICATION
|
||||||
|
#ifndef SIGPOLL
|
||||||
|
#define SIGPOLL SIGIO
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
|
static FdSource from(STDIN_FILENO);
|
||||||
|
static FdSink to(STDOUT_FILENO);
|
||||||
|
|
||||||
|
bool canSendStderr;
|
||||||
|
pid_t myPid;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
/* This function is called anytime we want to write something to
|
||||||
|
stderr. If we're in a state where the protocol allows it (i.e.,
|
||||||
|
when canSendStderr), send the message to the client over the
|
||||||
|
socket. */
|
||||||
|
static void tunnelStderr(const unsigned char * buf, size_t count)
|
||||||
|
{
|
||||||
|
/* Don't send the message to the client if we're a child of the
|
||||||
|
process handling the connection. Otherwise we could screw up
|
||||||
|
the protocol. It's up to the parent to redirect stderr and
|
||||||
|
send it to the client somehow (e.g., as in build.cc). */
|
||||||
|
if (canSendStderr && myPid == getpid()) {
|
||||||
|
try {
|
||||||
|
writeInt(STDERR_NEXT, to);
|
||||||
|
writeString(buf, count, to);
|
||||||
|
to.flush();
|
||||||
|
} catch (...) {
|
||||||
|
/* Write failed; that means that the other side is
|
||||||
|
gone. */
|
||||||
|
canSendStderr = false;
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
} else
|
||||||
|
writeFull(STDERR_FILENO, buf, count);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* Return true if the remote side has closed its end of the
|
||||||
|
connection, false otherwise. Should not be called on any socket on
|
||||||
|
which we expect input! */
|
||||||
|
static bool isFarSideClosed(int socket)
|
||||||
|
{
|
||||||
|
struct timeval timeout;
|
||||||
|
timeout.tv_sec = timeout.tv_usec = 0;
|
||||||
|
|
||||||
|
fd_set fds;
|
||||||
|
FD_ZERO(&fds);
|
||||||
|
FD_SET(socket, &fds);
|
||||||
|
|
||||||
|
while (select(socket + 1, &fds, 0, 0, &timeout) == -1)
|
||||||
|
if (errno != EINTR) throw SysError("select()");
|
||||||
|
|
||||||
|
if (!FD_ISSET(socket, &fds)) return false;
|
||||||
|
|
||||||
|
/* Destructive read to determine whether the select() marked the
|
||||||
|
socket as readable because there is actual input or because
|
||||||
|
we've reached EOF (i.e., a read of size 0 is available). */
|
||||||
|
char c;
|
||||||
|
int rd;
|
||||||
|
if ((rd = read(socket, &c, 1)) > 0)
|
||||||
|
throw Error("EOF expected (protocol error?)");
|
||||||
|
else if (rd == -1 && errno != ECONNRESET)
|
||||||
|
throw SysError("expected connection reset or EOF");
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* A SIGPOLL signal is received when data is available on the client
|
||||||
|
communication socket, or when the client has closed its side of the
|
||||||
|
socket. This handler is enabled at precisely those moments in the
|
||||||
|
protocol when we're doing work and the client is supposed to be
|
||||||
|
quiet. Thus, if we get a SIGPOLL signal, it means that the client
|
||||||
|
has quit. So we should quit as well.
|
||||||
|
|
||||||
|
Too bad most operating systems don't support the POLL_HUP value for
|
||||||
|
si_code in siginfo_t. That would make most of the SIGPOLL
|
||||||
|
complexity unnecessary, i.e., we could just enable SIGPOLL all the
|
||||||
|
time and wouldn't have to worry about races. */
|
||||||
|
static void sigPollHandler(int sigNo)
|
||||||
|
{
|
||||||
|
using namespace std;
|
||||||
|
try {
|
||||||
|
/* Check that the far side actually closed. We're still
|
||||||
|
getting spurious signals every once in a while. I.e.,
|
||||||
|
there is no input available, but we get a signal with
|
||||||
|
POLL_IN set. Maybe it's delayed or something. */
|
||||||
|
if (isFarSideClosed(from.fd)) {
|
||||||
|
if (!blockInt) {
|
||||||
|
_isInterrupted = 1;
|
||||||
|
blockInt = 1;
|
||||||
|
canSendStderr = false;
|
||||||
|
const char * s = "SIGPOLL\n";
|
||||||
|
write(STDERR_FILENO, s, strlen(s));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
const char * s = "spurious SIGPOLL\n";
|
||||||
|
write(STDERR_FILENO, s, strlen(s));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch (Error & e) {
|
||||||
|
/* Shouldn't happen. */
|
||||||
|
string s = "impossible: " + e.msg() + '\n';
|
||||||
|
write(STDERR_FILENO, s.data(), s.size());
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void setSigPollAction(bool enable)
|
||||||
|
{
|
||||||
|
#ifdef HAVE_HUP_NOTIFICATION
|
||||||
|
struct sigaction act, oact;
|
||||||
|
act.sa_handler = enable ? sigPollHandler : SIG_IGN;
|
||||||
|
sigfillset(&act.sa_mask);
|
||||||
|
act.sa_flags = 0;
|
||||||
|
if (sigaction(SIGPOLL, &act, &oact))
|
||||||
|
throw SysError("setting handler for SIGPOLL");
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* startWork() means that we're starting an operation for which we
|
||||||
|
want to send out stderr to the client. */
|
||||||
|
static void startWork()
|
||||||
|
{
|
||||||
|
canSendStderr = true;
|
||||||
|
|
||||||
|
/* Handle client death asynchronously. */
|
||||||
|
setSigPollAction(true);
|
||||||
|
|
||||||
|
/* Of course, there is a race condition here: the socket could
|
||||||
|
have closed between when we last read from / wrote to it, and
|
||||||
|
between the time we set the handler for SIGPOLL. In that case
|
||||||
|
we won't get the signal. So do a non-blocking select() to find
|
||||||
|
out if any input is available on the socket. If there is, it
|
||||||
|
has to be the 0-byte read that indicates that the socket has
|
||||||
|
closed. */
|
||||||
|
if (isFarSideClosed(from.fd)) {
|
||||||
|
_isInterrupted = 1;
|
||||||
|
checkInterrupt();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/* stopWork() means that we're done; stop sending stderr to the
|
||||||
|
client. */
|
||||||
|
static void stopWork(bool success = true, const string & msg = "", unsigned int status = 0)
|
||||||
|
{
|
||||||
|
/* Stop handling async client death; we're going to a state where
|
||||||
|
we're either sending or receiving from the client, so we'll be
|
||||||
|
notified of client death anyway. */
|
||||||
|
setSigPollAction(false);
|
||||||
|
|
||||||
|
canSendStderr = false;
|
||||||
|
|
||||||
|
if (success)
|
||||||
|
writeInt(STDERR_LAST, to);
|
||||||
|
else {
|
||||||
|
writeInt(STDERR_ERROR, to);
|
||||||
|
writeString(msg, to);
|
||||||
|
if (status != 0) writeInt(status, to);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
struct TunnelSink : Sink
|
||||||
|
{
|
||||||
|
Sink & to;
|
||||||
|
TunnelSink(Sink & to) : to(to) { }
|
||||||
|
virtual void operator () (const unsigned char * data, size_t len)
|
||||||
|
{
|
||||||
|
writeInt(STDERR_WRITE, to);
|
||||||
|
writeString(data, len, to);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
struct TunnelSource : BufferedSource
|
||||||
|
{
|
||||||
|
Source & from;
|
||||||
|
TunnelSource(Source & from) : from(from) { }
|
||||||
|
size_t readUnbuffered(unsigned char * data, size_t len)
|
||||||
|
{
|
||||||
|
/* Careful: we're going to receive data from the client now,
|
||||||
|
so we have to disable the SIGPOLL handler. */
|
||||||
|
setSigPollAction(false);
|
||||||
|
canSendStderr = false;
|
||||||
|
|
||||||
|
writeInt(STDERR_READ, to);
|
||||||
|
writeInt(len, to);
|
||||||
|
to.flush();
|
||||||
|
size_t n = readString(data, len, from);
|
||||||
|
|
||||||
|
startWork();
|
||||||
|
if (n == 0) throw EndOfFile("unexpected end-of-file");
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* If the NAR archive contains a single file at top-level, then save
|
||||||
|
the contents of the file to `s'. Otherwise barf. */
|
||||||
|
struct RetrieveRegularNARSink : ParseSink
|
||||||
|
{
|
||||||
|
bool regular;
|
||||||
|
string s;
|
||||||
|
|
||||||
|
RetrieveRegularNARSink() : regular(true) { }
|
||||||
|
|
||||||
|
void createDirectory(const Path & path)
|
||||||
|
{
|
||||||
|
regular = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
void receiveContents(unsigned char * data, unsigned int len)
|
||||||
|
{
|
||||||
|
s.append((const char *) data, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
void createSymlink(const Path & path, const string & target)
|
||||||
|
{
|
||||||
|
regular = false;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
/* Adapter class of a Source that saves all data read to `s'. */
|
||||||
|
struct SavingSourceAdapter : Source
|
||||||
|
{
|
||||||
|
Source & orig;
|
||||||
|
string s;
|
||||||
|
SavingSourceAdapter(Source & orig) : orig(orig) { }
|
||||||
|
size_t read(unsigned char * data, size_t len)
|
||||||
|
{
|
||||||
|
size_t n = orig.read(data, len);
|
||||||
|
s.append((const char *) data, n);
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
static void performOp(bool trusted, unsigned int clientVersion,
|
||||||
|
Source & from, Sink & to, unsigned int op)
|
||||||
|
{
|
||||||
|
switch (op) {
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
case wopQuit: {
|
||||||
|
/* Close the database. */
|
||||||
|
store.reset((StoreAPI *) 0);
|
||||||
|
writeInt(1, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
case wopIsValidPath: {
|
||||||
|
/* 'readStorePath' could raise an error leading to the connection
|
||||||
|
being closed. To be able to recover from an invalid path error,
|
||||||
|
call 'startWork' early, and do 'assertStorePath' afterwards so
|
||||||
|
that the 'Error' exception handler doesn't close the
|
||||||
|
connection. */
|
||||||
|
Path path = readString(from);
|
||||||
|
startWork();
|
||||||
|
assertStorePath(path);
|
||||||
|
bool result = store->isValidPath(path);
|
||||||
|
stopWork();
|
||||||
|
writeInt(result, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryValidPaths: {
|
||||||
|
PathSet paths = readStorePaths<PathSet>(from);
|
||||||
|
startWork();
|
||||||
|
PathSet res = store->queryValidPaths(paths);
|
||||||
|
stopWork();
|
||||||
|
writeStrings(res, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopHasSubstitutes: {
|
||||||
|
Path path = readStorePath(from);
|
||||||
|
startWork();
|
||||||
|
PathSet res = store->querySubstitutablePaths(singleton<PathSet>(path));
|
||||||
|
stopWork();
|
||||||
|
writeInt(res.find(path) != res.end(), to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQuerySubstitutablePaths: {
|
||||||
|
PathSet paths = readStorePaths<PathSet>(from);
|
||||||
|
startWork();
|
||||||
|
PathSet res = store->querySubstitutablePaths(paths);
|
||||||
|
stopWork();
|
||||||
|
writeStrings(res, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryPathHash: {
|
||||||
|
Path path = readStorePath(from);
|
||||||
|
startWork();
|
||||||
|
Hash hash = store->queryPathHash(path);
|
||||||
|
stopWork();
|
||||||
|
writeString(printHash(hash), to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryReferences:
|
||||||
|
case wopQueryReferrers:
|
||||||
|
case wopQueryValidDerivers:
|
||||||
|
case wopQueryDerivationOutputs: {
|
||||||
|
Path path = readStorePath(from);
|
||||||
|
startWork();
|
||||||
|
PathSet paths;
|
||||||
|
if (op == wopQueryReferences)
|
||||||
|
store->queryReferences(path, paths);
|
||||||
|
else if (op == wopQueryReferrers)
|
||||||
|
store->queryReferrers(path, paths);
|
||||||
|
else if (op == wopQueryValidDerivers)
|
||||||
|
paths = store->queryValidDerivers(path);
|
||||||
|
else paths = store->queryDerivationOutputs(path);
|
||||||
|
stopWork();
|
||||||
|
writeStrings(paths, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryDerivationOutputNames: {
|
||||||
|
Path path = readStorePath(from);
|
||||||
|
startWork();
|
||||||
|
StringSet names;
|
||||||
|
names = store->queryDerivationOutputNames(path);
|
||||||
|
stopWork();
|
||||||
|
writeStrings(names, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryDeriver: {
|
||||||
|
Path path = readStorePath(from);
|
||||||
|
startWork();
|
||||||
|
Path deriver = store->queryDeriver(path);
|
||||||
|
stopWork();
|
||||||
|
writeString(deriver, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryPathFromHashPart: {
|
||||||
|
string hashPart = readString(from);
|
||||||
|
startWork();
|
||||||
|
Path path = store->queryPathFromHashPart(hashPart);
|
||||||
|
stopWork();
|
||||||
|
writeString(path, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopAddToStore: {
|
||||||
|
string baseName = readString(from);
|
||||||
|
bool fixed = readInt(from) == 1; /* obsolete */
|
||||||
|
bool recursive = readInt(from) == 1;
|
||||||
|
string s = readString(from);
|
||||||
|
/* Compatibility hack. */
|
||||||
|
if (!fixed) {
|
||||||
|
s = "sha256";
|
||||||
|
recursive = true;
|
||||||
|
}
|
||||||
|
HashType hashAlgo = parseHashType(s);
|
||||||
|
|
||||||
|
SavingSourceAdapter savedNAR(from);
|
||||||
|
RetrieveRegularNARSink savedRegular;
|
||||||
|
|
||||||
|
if (recursive) {
|
||||||
|
/* Get the entire NAR dump from the client and save it to
|
||||||
|
a string so that we can pass it to
|
||||||
|
addToStoreFromDump(). */
|
||||||
|
ParseSink sink; /* null sink; just parse the NAR */
|
||||||
|
parseDump(sink, savedNAR);
|
||||||
|
} else
|
||||||
|
parseDump(savedRegular, from);
|
||||||
|
|
||||||
|
startWork();
|
||||||
|
if (!savedRegular.regular) throw Error("regular file expected");
|
||||||
|
Path path = dynamic_cast<LocalStore *>(store.get())
|
||||||
|
->addToStoreFromDump(recursive ? savedNAR.s : savedRegular.s, baseName, recursive, hashAlgo);
|
||||||
|
stopWork();
|
||||||
|
|
||||||
|
writeString(path, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopAddTextToStore: {
|
||||||
|
string suffix = readString(from);
|
||||||
|
string s = readString(from);
|
||||||
|
PathSet refs = readStorePaths<PathSet>(from);
|
||||||
|
startWork();
|
||||||
|
Path path = store->addTextToStore(suffix, s, refs);
|
||||||
|
stopWork();
|
||||||
|
writeString(path, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopExportPath: {
|
||||||
|
Path path = readStorePath(from);
|
||||||
|
bool sign = readInt(from) == 1;
|
||||||
|
startWork();
|
||||||
|
TunnelSink sink(to);
|
||||||
|
store->exportPath(path, sign, sink);
|
||||||
|
stopWork();
|
||||||
|
writeInt(1, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopImportPaths: {
|
||||||
|
startWork();
|
||||||
|
TunnelSource source(from);
|
||||||
|
Paths paths = store->importPaths(true, source);
|
||||||
|
stopWork();
|
||||||
|
writeStrings(paths, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopBuildPaths: {
|
||||||
|
PathSet drvs = readStorePaths<PathSet>(from);
|
||||||
|
startWork();
|
||||||
|
store->buildPaths(drvs);
|
||||||
|
stopWork();
|
||||||
|
writeInt(1, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopEnsurePath: {
|
||||||
|
Path path = readStorePath(from);
|
||||||
|
startWork();
|
||||||
|
store->ensurePath(path);
|
||||||
|
stopWork();
|
||||||
|
writeInt(1, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopAddTempRoot: {
|
||||||
|
Path path = readStorePath(from);
|
||||||
|
startWork();
|
||||||
|
store->addTempRoot(path);
|
||||||
|
stopWork();
|
||||||
|
writeInt(1, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopAddIndirectRoot: {
|
||||||
|
Path path = absPath(readString(from));
|
||||||
|
startWork();
|
||||||
|
store->addIndirectRoot(path);
|
||||||
|
stopWork();
|
||||||
|
writeInt(1, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopSyncWithGC: {
|
||||||
|
startWork();
|
||||||
|
store->syncWithGC();
|
||||||
|
stopWork();
|
||||||
|
writeInt(1, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopFindRoots: {
|
||||||
|
startWork();
|
||||||
|
Roots roots = store->findRoots();
|
||||||
|
stopWork();
|
||||||
|
writeInt(roots.size(), to);
|
||||||
|
for (Roots::iterator i = roots.begin(); i != roots.end(); ++i) {
|
||||||
|
writeString(i->first, to);
|
||||||
|
writeString(i->second, to);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopCollectGarbage: {
|
||||||
|
GCOptions options;
|
||||||
|
options.action = (GCOptions::GCAction) readInt(from);
|
||||||
|
options.pathsToDelete = readStorePaths<PathSet>(from);
|
||||||
|
options.ignoreLiveness = readInt(from);
|
||||||
|
options.maxFreed = readLongLong(from);
|
||||||
|
readInt(from); // obsolete field
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 5) {
|
||||||
|
/* removed options */
|
||||||
|
readInt(from);
|
||||||
|
readInt(from);
|
||||||
|
}
|
||||||
|
|
||||||
|
GCResults results;
|
||||||
|
|
||||||
|
startWork();
|
||||||
|
if (options.ignoreLiveness)
|
||||||
|
throw Error("you are not allowed to ignore liveness");
|
||||||
|
store->collectGarbage(options, results);
|
||||||
|
stopWork();
|
||||||
|
|
||||||
|
writeStrings(results.paths, to);
|
||||||
|
writeLongLong(results.bytesFreed, to);
|
||||||
|
writeLongLong(0, to); // obsolete
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopSetOptions: {
|
||||||
|
settings.keepFailed = readInt(from) != 0;
|
||||||
|
settings.keepGoing = readInt(from) != 0;
|
||||||
|
settings.set("build-fallback", readInt(from) ? "true" : "false");
|
||||||
|
verbosity = (Verbosity) readInt(from);
|
||||||
|
settings.set("build-max-jobs", int2String(readInt(from)));
|
||||||
|
settings.set("build-max-silent-time", int2String(readInt(from)));
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 2)
|
||||||
|
settings.useBuildHook = readInt(from) != 0;
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 4) {
|
||||||
|
settings.buildVerbosity = (Verbosity) readInt(from);
|
||||||
|
logType = (LogType) readInt(from);
|
||||||
|
settings.printBuildTrace = readInt(from) != 0;
|
||||||
|
}
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 6)
|
||||||
|
settings.set("build-cores", int2String(readInt(from)));
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 10)
|
||||||
|
settings.set("build-use-substitutes", readInt(from) ? "true" : "false");
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 12) {
|
||||||
|
unsigned int n = readInt(from);
|
||||||
|
for (unsigned int i = 0; i < n; i++) {
|
||||||
|
string name = readString(from);
|
||||||
|
string value = readString(from);
|
||||||
|
if (name == "build-timeout" || name == "use-ssh-substituter")
|
||||||
|
settings.set(name, value);
|
||||||
|
else
|
||||||
|
settings.set(trusted ? name : "untrusted-" + name, value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
settings.update();
|
||||||
|
startWork();
|
||||||
|
stopWork();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQuerySubstitutablePathInfo: {
|
||||||
|
Path path = absPath(readString(from));
|
||||||
|
startWork();
|
||||||
|
SubstitutablePathInfos infos;
|
||||||
|
store->querySubstitutablePathInfos(singleton<PathSet>(path), infos);
|
||||||
|
stopWork();
|
||||||
|
SubstitutablePathInfos::iterator i = infos.find(path);
|
||||||
|
if (i == infos.end())
|
||||||
|
writeInt(0, to);
|
||||||
|
else {
|
||||||
|
writeInt(1, to);
|
||||||
|
writeString(i->second.deriver, to);
|
||||||
|
writeStrings(i->second.references, to);
|
||||||
|
writeLongLong(i->second.downloadSize, to);
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 7)
|
||||||
|
writeLongLong(i->second.narSize, to);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQuerySubstitutablePathInfos: {
|
||||||
|
PathSet paths = readStorePaths<PathSet>(from);
|
||||||
|
startWork();
|
||||||
|
SubstitutablePathInfos infos;
|
||||||
|
store->querySubstitutablePathInfos(paths, infos);
|
||||||
|
stopWork();
|
||||||
|
writeInt(infos.size(), to);
|
||||||
|
foreach (SubstitutablePathInfos::iterator, i, infos) {
|
||||||
|
writeString(i->first, to);
|
||||||
|
writeString(i->second.deriver, to);
|
||||||
|
writeStrings(i->second.references, to);
|
||||||
|
writeLongLong(i->second.downloadSize, to);
|
||||||
|
writeLongLong(i->second.narSize, to);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryAllValidPaths: {
|
||||||
|
startWork();
|
||||||
|
PathSet paths = store->queryAllValidPaths();
|
||||||
|
stopWork();
|
||||||
|
writeStrings(paths, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryFailedPaths: {
|
||||||
|
startWork();
|
||||||
|
PathSet paths = store->queryFailedPaths();
|
||||||
|
stopWork();
|
||||||
|
writeStrings(paths, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopClearFailedPaths: {
|
||||||
|
PathSet paths = readStrings<PathSet>(from);
|
||||||
|
startWork();
|
||||||
|
store->clearFailedPaths(paths);
|
||||||
|
stopWork();
|
||||||
|
writeInt(1, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
case wopQueryPathInfo: {
|
||||||
|
Path path = readStorePath(from);
|
||||||
|
startWork();
|
||||||
|
ValidPathInfo info = store->queryPathInfo(path);
|
||||||
|
stopWork();
|
||||||
|
writeString(info.deriver, to);
|
||||||
|
writeString(printHash(info.hash), to);
|
||||||
|
writeStrings(info.references, to);
|
||||||
|
writeInt(info.registrationTime, to);
|
||||||
|
writeLongLong(info.narSize, to);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
throw Error(format("invalid operation %1%") % op);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void processConnection(bool trusted)
|
||||||
|
{
|
||||||
|
canSendStderr = false;
|
||||||
|
myPid = getpid();
|
||||||
|
_writeToStderr = tunnelStderr;
|
||||||
|
|
||||||
|
#ifdef HAVE_HUP_NOTIFICATION
|
||||||
|
/* Allow us to receive SIGPOLL for events on the client socket. */
|
||||||
|
setSigPollAction(false);
|
||||||
|
if (fcntl(from.fd, F_SETOWN, getpid()) == -1)
|
||||||
|
throw SysError("F_SETOWN");
|
||||||
|
if (fcntl(from.fd, F_SETFL, fcntl(from.fd, F_GETFL, 0) | O_ASYNC) == -1)
|
||||||
|
throw SysError("F_SETFL");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Exchange the greeting. */
|
||||||
|
unsigned int magic = readInt(from);
|
||||||
|
if (magic != WORKER_MAGIC_1) throw Error("protocol mismatch");
|
||||||
|
writeInt(WORKER_MAGIC_2, to);
|
||||||
|
writeInt(PROTOCOL_VERSION, to);
|
||||||
|
to.flush();
|
||||||
|
unsigned int clientVersion = readInt(from);
|
||||||
|
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 14 && readInt(from))
|
||||||
|
setAffinityTo(readInt(from));
|
||||||
|
|
||||||
|
bool reserveSpace = true;
|
||||||
|
if (GET_PROTOCOL_MINOR(clientVersion) >= 11)
|
||||||
|
reserveSpace = readInt(from) != 0;
|
||||||
|
|
||||||
|
/* Send startup error messages to the client. */
|
||||||
|
startWork();
|
||||||
|
|
||||||
|
try {
|
||||||
|
|
||||||
|
/* If we can't accept clientVersion, then throw an error
|
||||||
|
*here* (not above). */
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
/* Prevent users from doing something very dangerous. */
|
||||||
|
if (geteuid() == 0 &&
|
||||||
|
querySetting("build-users-group", "") == "")
|
||||||
|
throw Error("if you run `nix-daemon' as root, then you MUST set `build-users-group'!");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Open the store. */
|
||||||
|
store = std::shared_ptr<StoreAPI>(new LocalStore(reserveSpace));
|
||||||
|
|
||||||
|
stopWork();
|
||||||
|
to.flush();
|
||||||
|
|
||||||
|
} catch (Error & e) {
|
||||||
|
stopWork(false, e.msg());
|
||||||
|
to.flush();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Process client requests. */
|
||||||
|
unsigned int opCount = 0;
|
||||||
|
|
||||||
|
while (true) {
|
||||||
|
WorkerOp op;
|
||||||
|
try {
|
||||||
|
op = (WorkerOp) readInt(from);
|
||||||
|
} catch (EndOfFile & e) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
opCount++;
|
||||||
|
|
||||||
|
try {
|
||||||
|
performOp(trusted, clientVersion, from, to, op);
|
||||||
|
} catch (Error & e) {
|
||||||
|
/* If we're not in a state where we can send replies, then
|
||||||
|
something went wrong processing the input of the
|
||||||
|
client. This can happen especially if I/O errors occur
|
||||||
|
during addTextToStore() / importPath(). If that
|
||||||
|
happens, just send the error message and exit. */
|
||||||
|
bool errorAllowed = canSendStderr;
|
||||||
|
if (!errorAllowed) printMsg(lvlError, format("error processing client input: %1%") % e.msg());
|
||||||
|
stopWork(false, e.msg(), GET_PROTOCOL_MINOR(clientVersion) >= 8 ? e.status : 0);
|
||||||
|
if (!errorAllowed) break;
|
||||||
|
} catch (std::bad_alloc & e) {
|
||||||
|
if (canSendStderr)
|
||||||
|
stopWork(false, "Nix daemon out of memory", GET_PROTOCOL_MINOR(clientVersion) >= 8 ? 1 : 0);
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
|
||||||
|
to.flush();
|
||||||
|
|
||||||
|
assert(!canSendStderr);
|
||||||
|
};
|
||||||
|
|
||||||
|
printMsg(lvlError, format("%1% operations") % opCount);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void sigChldHandler(int sigNo)
|
||||||
|
{
|
||||||
|
/* Reap all dead children. */
|
||||||
|
while (waitpid(-1, 0, WNOHANG) > 0) ;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void setSigChldAction(bool autoReap)
|
||||||
|
{
|
||||||
|
struct sigaction act, oact;
|
||||||
|
act.sa_handler = autoReap ? sigChldHandler : SIG_DFL;
|
||||||
|
sigfillset(&act.sa_mask);
|
||||||
|
act.sa_flags = 0;
|
||||||
|
if (sigaction(SIGCHLD, &act, &oact))
|
||||||
|
throw SysError("setting SIGCHLD handler");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#define SD_LISTEN_FDS_START 3
|
||||||
|
|
||||||
|
|
||||||
|
static void daemonLoop()
|
||||||
|
{
|
||||||
|
/* Get rid of children automatically; don't let them become
|
||||||
|
zombies. */
|
||||||
|
setSigChldAction(true);
|
||||||
|
|
||||||
|
AutoCloseFD fdSocket;
|
||||||
|
|
||||||
|
/* Handle socket-based activation by systemd. */
|
||||||
|
if (getEnv("LISTEN_FDS") != "") {
|
||||||
|
if (getEnv("LISTEN_PID") != int2String(getpid()) || getEnv("LISTEN_FDS") != "1")
|
||||||
|
throw Error("unexpected systemd environment variables");
|
||||||
|
fdSocket = SD_LISTEN_FDS_START;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Otherwise, create and bind to a Unix domain socket. */
|
||||||
|
else {
|
||||||
|
|
||||||
|
/* Create and bind to a Unix domain socket. */
|
||||||
|
fdSocket = socket(PF_UNIX, SOCK_STREAM, 0);
|
||||||
|
if (fdSocket == -1)
|
||||||
|
throw SysError("cannot create Unix domain socket");
|
||||||
|
|
||||||
|
string socketPath = settings.nixDaemonSocketFile;
|
||||||
|
|
||||||
|
createDirs(dirOf(socketPath));
|
||||||
|
|
||||||
|
/* Urgh, sockaddr_un allows path names of only 108 characters.
|
||||||
|
So chdir to the socket directory so that we can pass a
|
||||||
|
relative path name. */
|
||||||
|
chdir(dirOf(socketPath).c_str());
|
||||||
|
Path socketPathRel = "./" + baseNameOf(socketPath);
|
||||||
|
|
||||||
|
struct sockaddr_un addr;
|
||||||
|
addr.sun_family = AF_UNIX;
|
||||||
|
if (socketPathRel.size() >= sizeof(addr.sun_path))
|
||||||
|
throw Error(format("socket path `%1%' is too long") % socketPathRel);
|
||||||
|
strcpy(addr.sun_path, socketPathRel.c_str());
|
||||||
|
|
||||||
|
unlink(socketPath.c_str());
|
||||||
|
|
||||||
|
/* Make sure that the socket is created with 0666 permission
|
||||||
|
(everybody can connect --- provided they have access to the
|
||||||
|
directory containing the socket). */
|
||||||
|
mode_t oldMode = umask(0111);
|
||||||
|
int res = bind(fdSocket, (struct sockaddr *) &addr, sizeof(addr));
|
||||||
|
umask(oldMode);
|
||||||
|
if (res == -1)
|
||||||
|
throw SysError(format("cannot bind to socket `%1%'") % socketPath);
|
||||||
|
|
||||||
|
chdir("/"); /* back to the root */
|
||||||
|
|
||||||
|
if (listen(fdSocket, 5) == -1)
|
||||||
|
throw SysError(format("cannot listen on socket `%1%'") % socketPath);
|
||||||
|
}
|
||||||
|
|
||||||
|
closeOnExec(fdSocket);
|
||||||
|
|
||||||
|
/* Loop accepting connections. */
|
||||||
|
while (1) {
|
||||||
|
|
||||||
|
try {
|
||||||
|
/* Important: the server process *cannot* open the SQLite
|
||||||
|
database, because it doesn't like forks very much. */
|
||||||
|
assert(!store);
|
||||||
|
|
||||||
|
/* Accept a connection. */
|
||||||
|
struct sockaddr_un remoteAddr;
|
||||||
|
socklen_t remoteAddrLen = sizeof(remoteAddr);
|
||||||
|
|
||||||
|
AutoCloseFD remote = accept(fdSocket,
|
||||||
|
(struct sockaddr *) &remoteAddr, &remoteAddrLen);
|
||||||
|
checkInterrupt();
|
||||||
|
if (remote == -1) {
|
||||||
|
if (errno == EINTR)
|
||||||
|
continue;
|
||||||
|
else
|
||||||
|
throw SysError("accepting connection");
|
||||||
|
}
|
||||||
|
|
||||||
|
closeOnExec(remote);
|
||||||
|
|
||||||
|
/* Get the identity of the caller, if possible. */
|
||||||
|
uid_t clientUid = -1;
|
||||||
|
pid_t clientPid = -1;
|
||||||
|
bool trusted = false;
|
||||||
|
|
||||||
|
#if defined(SO_PEERCRED)
|
||||||
|
ucred cred;
|
||||||
|
socklen_t credLen = sizeof(cred);
|
||||||
|
if (getsockopt(remote, SOL_SOCKET, SO_PEERCRED, &cred, &credLen) != -1) {
|
||||||
|
clientPid = cred.pid;
|
||||||
|
clientUid = cred.uid;
|
||||||
|
if (clientUid == 0) trusted = true;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
printMsg(lvlInfo, format("accepted connection from pid %1%, uid %2%") % clientPid % clientUid);
|
||||||
|
|
||||||
|
/* Fork a child to handle the connection. */
|
||||||
|
pid_t child;
|
||||||
|
child = fork();
|
||||||
|
|
||||||
|
switch (child) {
|
||||||
|
|
||||||
|
case -1:
|
||||||
|
throw SysError("unable to fork");
|
||||||
|
|
||||||
|
case 0:
|
||||||
|
try { /* child */
|
||||||
|
|
||||||
|
/* Background the daemon. */
|
||||||
|
if (setsid() == -1)
|
||||||
|
throw SysError(format("creating a new session"));
|
||||||
|
|
||||||
|
/* Restore normal handling of SIGCHLD. */
|
||||||
|
setSigChldAction(false);
|
||||||
|
|
||||||
|
/* For debugging, stuff the pid into argv[1]. */
|
||||||
|
if (clientPid != -1 && argvSaved[1]) {
|
||||||
|
string processName = int2String(clientPid);
|
||||||
|
strncpy(argvSaved[1], processName.c_str(), strlen(argvSaved[1]));
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Handle the connection. */
|
||||||
|
from.fd = remote;
|
||||||
|
to.fd = remote;
|
||||||
|
processConnection(trusted);
|
||||||
|
|
||||||
|
} catch (std::exception & e) {
|
||||||
|
writeToStderr("unexpected Nix daemon error: " + string(e.what()) + "\n");
|
||||||
|
}
|
||||||
|
exit(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
} catch (Interrupted & e) {
|
||||||
|
throw;
|
||||||
|
} catch (Error & e) {
|
||||||
|
printMsg(lvlError, format("error processing connection: %1%") % e.msg());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void run(Strings args)
|
||||||
|
{
|
||||||
|
for (Strings::iterator i = args.begin(); i != args.end(); ) {
|
||||||
|
string arg = *i++;
|
||||||
|
if (arg == "--daemon") /* ignored for backwards compatibility */;
|
||||||
|
}
|
||||||
|
|
||||||
|
chdir("/");
|
||||||
|
daemonLoop();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void printHelp()
|
||||||
|
{
|
||||||
|
showManPage("nix-daemon");
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
string programId = "nix-daemon";
|
|
@ -1,89 +0,0 @@
|
||||||
#!/bin/sh
|
|
||||||
# GNU Guix --- Functional package management for GNU
|
|
||||||
# Copyright © 2012, 2013, 2014 Ludovic Courtès <ludo@gnu.org>
|
|
||||||
#
|
|
||||||
# This file is part of GNU Guix.
|
|
||||||
#
|
|
||||||
# GNU Guix is free software; you can redistribute it and/or modify it
|
|
||||||
# under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation; either version 3 of the License, or (at
|
|
||||||
# your option) any later version.
|
|
||||||
#
|
|
||||||
# GNU Guix is distributed in the hope that it will be useful, but
|
|
||||||
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with GNU Guix. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
#
|
|
||||||
# Update the local copy of Nix source code needed to build the daemon.
|
|
||||||
# Assume GNU Coreutils and Git are available.
|
|
||||||
#
|
|
||||||
|
|
||||||
top_srcdir="${top_srcdir:-..}"
|
|
||||||
|
|
||||||
log()
|
|
||||||
{
|
|
||||||
echo "sync-with-upstream: $@" >&2
|
|
||||||
}
|
|
||||||
|
|
||||||
# checked_in_p FILE
|
|
||||||
checked_in_p()
|
|
||||||
{
|
|
||||||
( cd "$top_srcdir" ;
|
|
||||||
git ls-tree HEAD -- "nix/$1" | grep "$1" > /dev/null )
|
|
||||||
}
|
|
||||||
|
|
||||||
if [ ! -d "$top_srcdir/build-aux" ]
|
|
||||||
then
|
|
||||||
log "\`$top_srcdir' is not the valid top-level source directory"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
set -e
|
|
||||||
for upstream_file in `cd "$top_srcdir/nix-upstream/src" ;
|
|
||||||
find . -name \*.c -or -name \*.h -or -name \*.cc -or -name \*.hh \
|
|
||||||
-or -name \*.cpp -or -name \*.hpp -or -name \*.sql`
|
|
||||||
do
|
|
||||||
if grep "$upstream_file" "$top_srcdir/daemon.am" > /dev/null
|
|
||||||
then
|
|
||||||
if checked_in_p "$upstream_file"
|
|
||||||
then
|
|
||||||
log "skipping \`$upstream_file', which has a checked-in copy"
|
|
||||||
else
|
|
||||||
( cd "$top_srcdir/nix-upstream/src" && \
|
|
||||||
cp -v --parents "$upstream_file" ../../nix )
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
log "skipping \`$upstream_file', which is not used"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# This file should be generated by our build system so remove it.
|
|
||||||
rm -fv "$top_srcdir/nix/libstore/schema.sql.hh"
|
|
||||||
|
|
||||||
cp -v "$top_srcdir/nix-upstream/COPYING" "$top_srcdir/nix"
|
|
||||||
|
|
||||||
# Generate an 'AUTHORS' file since upstream Nix no longer has one.
|
|
||||||
cat > "$top_srcdir/nix/AUTHORS" <<EOF
|
|
||||||
Most of the code is this directory was written by the following people for
|
|
||||||
the Nix project (http://nixos.org/nix). Thank you!
|
|
||||||
|
|
||||||
EOF
|
|
||||||
( cd "$top_srcdir/nix-upstream" ; git shortlog --summary ) \
|
|
||||||
| sed -'es/^ *[0-9]\+\(.*\)/ \1/g' \
|
|
||||||
>> "$top_srcdir/nix/AUTHORS"
|
|
||||||
|
|
||||||
# Substitutions.
|
|
||||||
sed -i "$top_srcdir/nix/libstore/gc.cc" \
|
|
||||||
-e 's|/nix/find-runtime-roots\.pl|/guix/list-runtime-roots|g'
|
|
||||||
|
|
||||||
# Our 'guix_hash_context' structure has a copy constructor, specifically to
|
|
||||||
# handle the use case in 'HashSink::currentHash()' where the copy of the
|
|
||||||
# context is expected to truly copy the underlying hash context. The copy
|
|
||||||
# constructor cannot be used in 'Ctx' if that's a union, so turn it into a
|
|
||||||
# structure (we can afford to two wasted words.)
|
|
||||||
sed -i "$top_srcdir/nix/libutil/hash.cc" "$top_srcdir/nix/libutil/hash.hh" \
|
|
||||||
-e 's|union Ctx|struct Ctx|g'
|
|
|
@ -5,6 +5,7 @@ de
|
||||||
en@boldquot
|
en@boldquot
|
||||||
en@quot
|
en@quot
|
||||||
eo
|
eo
|
||||||
|
fr
|
||||||
hu
|
hu
|
||||||
pt_BR
|
pt_BR
|
||||||
sr
|
sr
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -4,6 +4,7 @@ de
|
||||||
en@boldquot
|
en@boldquot
|
||||||
en@quot
|
en@quot
|
||||||
eo
|
eo
|
||||||
|
fr
|
||||||
hu
|
hu
|
||||||
pt_BR
|
pt_BR
|
||||||
sr
|
sr
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -35,6 +35,11 @@
|
||||||
(define %store
|
(define %store
|
||||||
(open-connection-for-tests))
|
(open-connection-for-tests))
|
||||||
|
|
||||||
|
(define-syntax-rule (test-assertm name exp)
|
||||||
|
(test-assert name
|
||||||
|
(run-with-store %store exp
|
||||||
|
#:guile-for-build (%guile-for-build))))
|
||||||
|
|
||||||
;; Example manifest entries.
|
;; Example manifest entries.
|
||||||
|
|
||||||
(define guile-1.8.8
|
(define guile-1.8.8
|
||||||
|
@ -156,19 +161,18 @@
|
||||||
(equal? (list glibc) install)
|
(equal? (list glibc) install)
|
||||||
(equal? (list (cons guile-1.8.8 guile-2.0.9)) upgrade)))))
|
(equal? (list (cons guile-1.8.8 guile-2.0.9)) upgrade)))))
|
||||||
|
|
||||||
(test-assert "profile-derivation"
|
(test-assertm "profile-derivation"
|
||||||
(run-with-store %store
|
(mlet* %store-monad
|
||||||
(mlet* %store-monad
|
((entry -> (package->manifest-entry %bootstrap-guile))
|
||||||
((entry -> (package->manifest-entry %bootstrap-guile))
|
(guile (package->derivation %bootstrap-guile))
|
||||||
(guile (package->derivation %bootstrap-guile))
|
(drv (profile-derivation (manifest (list entry))
|
||||||
(drv (profile-derivation (manifest (list entry))
|
#:info-dir? #f))
|
||||||
#:info-dir? #f))
|
(profile -> (derivation->output-path drv))
|
||||||
(profile -> (derivation->output-path drv))
|
(bindir -> (string-append profile "/bin"))
|
||||||
(bindir -> (string-append profile "/bin"))
|
(_ (built-derivations (list drv))))
|
||||||
(_ (built-derivations (list drv))))
|
(return (and (file-exists? (string-append bindir "/guile"))
|
||||||
(return (and (file-exists? (string-append bindir "/guile"))
|
(string=? (dirname (readlink bindir))
|
||||||
(string=? (dirname (readlink bindir))
|
(derivation->output-path guile))))))
|
||||||
(derivation->output-path guile)))))))
|
|
||||||
|
|
||||||
(test-end "profiles")
|
(test-end "profiles")
|
||||||
|
|
||||||
|
|
|
@ -74,7 +74,7 @@
|
||||||
(lset<= string=? names (all-network-interfaces)))))
|
(lset<= string=? names (all-network-interfaces)))))
|
||||||
|
|
||||||
(test-assert "network-interface-flags"
|
(test-assert "network-interface-flags"
|
||||||
(let* ((sock (socket SOCK_STREAM AF_INET 0))
|
(let* ((sock (socket AF_INET SOCK_STREAM 0))
|
||||||
(flags (network-interface-flags sock "lo")))
|
(flags (network-interface-flags sock "lo")))
|
||||||
(close-port sock)
|
(close-port sock)
|
||||||
(and (not (zero? (logand flags IFF_LOOPBACK)))
|
(and (not (zero? (logand flags IFF_LOOPBACK)))
|
||||||
|
@ -90,6 +90,38 @@
|
||||||
(lambda args
|
(lambda args
|
||||||
(system-error-errno args)))))
|
(system-error-errno args)))))
|
||||||
|
|
||||||
|
(test-skip (if (zero? (getuid)) 1 0))
|
||||||
|
(test-equal "set-network-interface-flags"
|
||||||
|
EPERM
|
||||||
|
(let ((sock (socket AF_INET SOCK_STREAM 0)))
|
||||||
|
(catch 'system-error
|
||||||
|
(lambda ()
|
||||||
|
(set-network-interface-flags sock "lo" IFF_UP))
|
||||||
|
(lambda args
|
||||||
|
(close-port sock)
|
||||||
|
(system-error-errno args)))))
|
||||||
|
|
||||||
|
(test-equal "network-interface-address lo"
|
||||||
|
(make-socket-address AF_INET (inet-pton AF_INET "127.0.0.1") 0)
|
||||||
|
(let* ((sock (socket AF_INET SOCK_STREAM 0))
|
||||||
|
(addr (network-interface-address sock "lo")))
|
||||||
|
(close-port sock)
|
||||||
|
addr))
|
||||||
|
|
||||||
|
(test-equal "set-network-interface-address"
|
||||||
|
EPERM
|
||||||
|
(let ((sock (socket AF_INET SOCK_STREAM 0)))
|
||||||
|
(catch 'system-error
|
||||||
|
(lambda ()
|
||||||
|
(set-network-interface-address sock "nonexistent"
|
||||||
|
(make-socket-address
|
||||||
|
AF_INET
|
||||||
|
(inet-pton AF_INET "127.12.14.15")
|
||||||
|
0)))
|
||||||
|
(lambda args
|
||||||
|
(close-port sock)
|
||||||
|
(system-error-errno args)))))
|
||||||
|
|
||||||
(test-end)
|
(test-end)
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue