summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.ycm_extra_conf.py15
-rw-r--r--AUTHORS1078
-rw-r--r--LICENSE14
-rw-r--r--README.md16
-rw-r--r--aclocal.m427
-rw-r--r--build/autoconf/config.guess1454
-rw-r--r--build/autoconf/mozconfig-find76
-rw-r--r--build/autoconf/mozconfig2client-mk76
-rw-r--r--build/dumbmake-dependencies0
-rw-r--r--build/mach_bootstrap.py14
-rw-r--r--build/pymake/make.py40
-rw-r--r--build/pypng/check-sync-exceptions3
-rw-r--r--build/pypng/exnumpy.py128
-rw-r--r--build/pypng/iccp.py537
-rw-r--r--build/pypng/mkiccp.py45
-rw-r--r--build/pypng/pdsimgtopng99
-rw-r--r--build/pypng/pipasgrey73
-rw-r--r--build/pypng/pipcat44
-rw-r--r--build/pypng/pipcolours56
-rw-r--r--build/pypng/pipcomposite121
-rw-r--r--build/pypng/pipdither181
-rw-r--r--build/pypng/piprgb36
-rw-r--r--build/pypng/pipscalez53
-rw-r--r--build/pypng/pipstack127
-rw-r--r--build/pypng/pipwindow67
-rw-r--r--build/pypng/plan9topng.py293
-rw-r--r--build/pypng/pngchunk172
-rw-r--r--build/pypng/pnghist79
-rw-r--r--build/pypng/pnglsch31
-rw-r--r--build/pypng/texttopng151
-rw-r--r--client.mk466
-rw-r--r--config/baseconfig.mk16
-rw-r--r--config/config.mk7
-rw-r--r--config/configobj.py2279
-rw-r--r--config/makefiles/autotargets.mk94
-rw-r--r--config/makefiles/makeutils.mk117
-rw-r--r--config/printconfigsetting.py25
-rw-r--r--config/recurse.mk9
-rw-r--r--config/rules.mk13
-rw-r--r--configure.in38
-rw-r--r--configure.py32
-rw-r--r--mach4
-rw-r--r--moz.build7
-rw-r--r--moz.configure7
44 files changed, 8220 insertions, 0 deletions
diff --git a/.ycm_extra_conf.py b/.ycm_extra_conf.py
new file mode 100644
index 0000000..05fb579
--- /dev/null
+++ b/.ycm_extra_conf.py
@@ -0,0 +1,15 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import imp, os, sys
+
+old_bytecode = sys.dont_write_bytecode
+sys.dont_write_bytecode = True
+
+ycm_module = imp.load_source("_ycm_extra_conf", os.path.join("mozilla", ".ycm_extra_conf.py"))
+
+sys.dont_write_bytecode = old_bytecode
+
+# Expose the FlagsForFile function from mozilla/.ycm_extra_conf.py
+FlagsForFile = ycm_module.FlagsForFile
diff --git a/AUTHORS b/AUTHORS
new file mode 100644
index 0000000..2c8a12a
--- /dev/null
+++ b/AUTHORS
@@ -0,0 +1,1078 @@
+This is an (incomplete) list of people who have contributed to the
+codebase which lives in this repository. If you make a contribution
+here, you may add your name and, optionally, email address in the
+appropriate place.
+
+For a full list of the people who are credited with making a
+contribution to Mozilla, see http://www.mozilla.org/credits/
+
+This list contains additional credits of people who have contributed
+to this Mozilla fork and that are not listed on the abovementioned
+credits page. Both resources should be considered when assessing
+full credit for the code base which lives in this repository.
+
+<1010mozilla@Ostermiller.com>
+Aaron Boodman <aa@google.com>
+Aaron Kaluszka <ask@swva.net>
+Aaron Leventhal <aaronleventhal@moonset.net>
+Aaron Nowack <anowack@mimiru.net>
+Aaron Reed <aaronr@us.ibm.com>
+Aaron Spangler <aaron@spangler.ods.org>
+Aaron Train <aaron.train@gmail.com>
+Abdelrhman Ahmed <a.ahmed1026@gmail.com>
+Achim Hasenmueller <achimha@innotek.de>
+ActiveState Tool Corp.
+Adam Barth <hk9565@gmail.com>
+Adam Christian <adam.christian@gmail.com>
+Adam Hauner
+Adam Lock <adamlock@netscape.com>
+Adam L. Peller
+Adam Souzis <adam@souzis.com>
+Aditya Rao <adicoolrao@gmail.com>
+Adobe Systems Incorporated
+Adrian Havill <havill@redhat.com>
+Adrian Herscu <bmf1972@icqmail.com>
+Adrian Johnson <ajohnson@redneon.com>
+Adrian Kalla <akalla@aviary.pl>
+Adrian Klein <dragosan@dragosan.net>
+a-higuti
+Aiko
+Akhil Arora <akhil.arora@sun.com>
+Akkana Peck <akkana@netscape.com>
+Alden D'Souza <aldenfloyd@gmail.com>
+Alec Flett <alecf@flett.org>
+Aleksey Chernoraenko <archer@meta-comm.com>
+Aleksey Nogin <ayn2@cornell.edu>
+Aleks Totic <a@totic.org>
+Alexander Law <1@1o.ru>
+Alexander Surkov <surkov.alexander@gmail.com>
+Alexey Chernyak <alexeyc@bigfoot.com>
+Alex Fritze <alex@croczilla.com>
+Alex Miller <amiller@mozilla.com>
+Alex Musil
+Alex Pakhotin <alexp@mozilla.com>
+Alex Russell
+Alex Vincent <ajvincent@gmail.com>
+Alfred Kayser <alfredkayser@nl.ibm.com>
+Alfred Peng <alfred.peng@sun.com>
+Ali Juma <ajuma@mozilla.com>
+Allan Beaufour <allan@beaufour.dk>
+Allen Eubank <adeubank@gmail.com>
+Allison Naaktgeboren <ally@mozilla.com>
+Alon Zakai <azakai@mozilla.com>
+Amir Szekely <kichik@gmail.com>
+Anant Narayanan <anant@kix.in>
+An-Cheng Huang <pach@cs.cmu.edu>
+Anders Hammarquist
+Andras Timar
+Andrea Canciani <ranma42@gmail.com>
+Andreas Gal <gal@uci.edu>
+Andreas M. Schneider <clarence@clarence.de>
+Andreas Otte <andreas.otte@debitel.net>
+Andrei Saprykin
+Andrei Volkov <av@netscape.com>
+Andrew Drake <drakedevel@gmail.com>
+Andrew Halberstadt <halbersa@gmail.com>
+Andrew Huntwork <ash@huntwork.net>
+Andrew Schultz <ajschult@verizon.net>
+Andrew Shilliday <andrewshilliday@gmail.com>
+Andrew Shultz
+Andrew Smith
+Andrew Sutherland <asutherland@asutherland.org>
+Andrew Thompson
+Andrew Zabolotny
+Andrzej Skalski <askalski@mozilla.com>
+Annie Sullivan <annie.sullivan@gmail.com>
+Anoop Saldanha <poonaatsoc@gmail.com>
+antonglv
+Antti Järvelin <antti.i.jarvelin@gmail.com>
+Arkady Blyakher <rkadyb@mit.edu>
+Armen Zambrano Gasparnian <armenzg@mozilla.com>
+Arno Renevier
+Arpad Borsos <arpad.borsos@googlemail.com>
+Arron Mogge <paper@animecity.nu>
+Arthur Wiebe <artooro@gmail.com>
+Asaf Romano <mozilla.mano@sent.com>
+Asko Tontti <atontti@cc.hut.fi>
+Atul Apte <aapte135@gmail.com>
+Atul Varma <atul@mozilla.com>
+Axel Hecht <axel@pike.org>
+Aza Raskin <aza@mozilla.com>
+Bart <dev@bartbizon.com>
+Bas Schouten <bschouten@mozilla.com>
+Bastiaan Jacques <b.jacques@planet.nl>
+<bedney@technicalpursuit.com>
+Behdad Esfahbod <behdad@behdad.org>
+Behnam Esfahbod <behnam@zwnj.org>
+Ben Basson <contact@cusser.net>
+Ben Bucksch <mozilla@bucksch.org>
+Ben Combee <bcombee@mozilla.com>
+Bencsath Boldizsar
+Benedict Hsieh <bhsieh@mozilla.com>
+Ben Goodger <ben@bengoodger.com>
+Ben Hearsum <bhearsum@mozilla.com>
+Ben Hsieh <ben.hsieh@gmail.com>
+Benjamin Frisch <bfrisch@gmail.com>
+Benjamin Otte <otte@gnome.org>
+Benjamin Smedberg <benjamin@smedbergs.us>
+Benjamin Stover <bstover@mozilla.com>
+Ben Newman <bnewman@mozilla.com>
+Benoit Girard <b56girard@gmail.com>
+Benoit Jacob <bjacob@mozilla.com>
+Ben Turner <bent.mozilla@gmail.com>
+Bertrand Le Roy
+bex@xaotec.com
+Biju
+Bill Haneman (bill.haneman@sun.com)
+Bill Law <law@netscape.com>
+Bill Worley
+Biro Arpad
+Bjarne Geir Herland <bjarne@runitsoft.com>
+Björn Jacke <bjoern.jacke@gmx.de>
+Blair McBride <bmcbride@mozilla.com>
+Blake Kaplan <mrbkap@gmail.com>
+Blake Ross <blake@blakeross.com>
+Blue Static
+Bobby Holley <bobbyholley@gmail.com>
+Bob Clary <bob@bclary.com>
+bobj@netscape.com
+Bob Lord <lord@netscape.com>
+Bob Miller <kbob@oblix.com>
+Bob Moss <bmoss@mozilla.com>
+Bob Relyea <rrelyea@redhat.com>
+Bodo Moeller <moeller@cdc.informatik.tu-darmstadt.de>
+bogomip
+Bolian Yin <bolian.yin@sun.com>
+Boris Zbarsky <bzbarsky@mit.edu>
+Brad Lassey <brad@lassey.us>
+Bradley Baetz <bbaetz@acm.org>
+Brad Taylor <brad@getcoded.net>
+Bram Moolenaar
+Brandon Pung <brandonpung@gmail.com>
+Brandon Sterne <bsterne@mozilla.com>
+Brant Gurganus <brantgurganus2001@cherokeescouting.org>
+Braun <ebraun@o2.pl>
+Brendan Eich <brendan@mozilla.org>
+Brett Wilson <brettw@gmail.com>
+Brian Birtles <birtles@gmail.com>
+Brian Bober <netdemonz@yahoo.com>
+Brian Crowder <crowderbt@gmail.com>
+briang@tonic.com
+Brian Hackett <bhackett@mozilla.com>
+Brian Lu <brian.lu@sun.com>
+Brian Nesse <bnesse@netscape.com>
+Brian Nicholson <bnicholson@mozilla.com>
+Brian O'Keefe <bokeefe@alum.wpi.edu>
+Brian R. Bondy <netzen@gmail.com>
+Brian Ryner <bryner@brianryner.com>
+Brian Smith <bsmith@mozilla.com>
+Brian Stell <bstell@ix.netcom.com>
+Brijesh Patel <brijesh3105@gmail.com>
+Brodie Thiesfield <brofield@jellycan.com>
+Bruce Hoult
+Bruno Haible <bruno@clisp.org>
+Bryant Chen
+Calum Robinson <calumr@mac.com>
+CanadianGuy
+Canonical Ltd
+Caolan McNamara <cmc@openoffice.org>
+Carl D. Worth <cworth@cworth.org>
+Carlo Alberto Ferraris <cafxx@strayorange.com>
+Carsten Book
+Catalin Patulea
+Cavin Song <cavin@netscape.com>
+Cedric Vivier <cedricv@neonux.com>
+Celso Aguiar <celsoaguiar@hotmail.com>
+Chak Nanga <chak@netscape.com>
+Chao-ying Fu <fu@mips.com>
+Charles Manske <cmanske@netscape.com>
+Charles Verdon
+Chase Phillips <chase@mozilla.org>
+Chase Tingley <tingley@sundell.net>
+Chenxia Liu <liuche@mozilla.com>
+Chip Clark <chipc@netscape.com>
+Chookij Vanatham <Chookij.Vanatham@Eng.Sun.COM>
+Chris AtLee <catlee@mozilla.com>
+Chris Beard <cbeard@mozilla.com>
+Chris Coulson <chris.coulson@canonical.com>
+Chris Double <chris.double@double.co.nz>
+Chris Evans
+Chris Halls
+Chris Jones <jones.chris.g@gmail.com>
+Chris Leary <cdleary@mozilla.com>
+Chris Lord <chrislord.net@gmail.com>
+Chris McAfee <mcafee@netscape.com>
+Chris Pearce <chris@pearce.org.nz>
+Chris Saari <saari@netscape.com>
+Chris Seawood <cls@seawood.org>
+Christian Biesinger <cbiesinger@gmail.com>
+Christian Bodart <chris@bodar.com>
+Christian Schneider <c.schneider@scram.de>
+christine@netscape.com
+Christopher A. Aillon <christopher@aillon.com>
+Christopher Blizzard <blizzard@mozilla.org>
+Christopher Davis <chrisd@torproject.org>
+Christopher Nebergall <cneberg@sandia.gov>
+Christopher Thomas <cst@yecc.com>
+Chris Torek <torek@bsdi.com>
+Chris Waterson <waterson@netscape.com>
+Chris Wilson <chris@chris-wilson.co.uk>
+<chwu@nortelnetworks.com>
+Claudio Ciccani <klan@users.sf.net>
+Clayton Williams <claytonw@mit.edu>
+Clint Talbert <cmtalbert@gmail.com>
+Colin Barrett <cbarrett@mozilla.com>
+Colin Blake <colin@theblakes.com>
+<coliver@mminternet.com>
+Collabora Ltd
+Collin Jackson <mozilla@collinjackson.com>
+conor@the325project.org
+Conrad Carlen <conrad@ingress.com>
+Constantine A. Murenin <cnst+moz#bugmail.mojo.ru>
+Craig Topper <craig.topper@gmail.com>
+crock@veilnetworks.com
+Crocodile Clips Ltd
+Cryptography Research
+CSIRO
+Curtis Bartley <cbartley@mozilla.com>
+Cyrus Omar
+Cyrus Patel <cyp@fb14.uni-mainz.de>
+Dafydd Jones
+Dainis Jonitis <Dainis_Jonitis@swh-t.lv>
+Daniel Aquino <mr.danielaquino@gmail.com>
+Daniel Bratell <bratell@lysator.liu.se>
+Daniel Brooks <db48x@db48x.net>
+Daniel Glazman <daniel.glazman@disruptive-innovations.com>
+Daniel Holbert <dholbert@mozilla.com>
+Daniel Kouril <kouril@ics.muni.cz>
+Daniel Kraft <d@domob.eu>
+Daniel Krieg
+Daniel Veditz <dveditz@cruzio.com>
+Daniel Witte <dwitte@stanford.edu>
+Dan Matejka <danm@netscape.com>
+Dan Mills <thunder@mozilla.com>
+Dan Mosedale <dmose@mozilla.org>
+Dan Rosen <dr@netscape.com>
+Dan Witte <dwitte@mozilla.com>
+Dão Gottwald <dao@design-noir.de>
+Darin Fisher <darin@meer.net>
+darren.deridder@icarusproject.com
+Dave Camp <dcamp@mozilla.com>
+Dave Herman <dherman@mozilla.com>
+davel@mozilla.com
+Dave Mandelin <dmandelin@mozilla.com>
+Dave Reed
+Dave Townsend <dtownsend@oxymoronical.com>
+David Anderson <danderson@mozilla.com>
+David Baron <dbaron@dbaron.org>
+David Bienvenu <bienvenu@nventure.com>
+David Bradley <bradley@netscape.com>
+David Burns <dburns@mozilla.com>
+David Courtin <dcourtin@systemplus.fr>
+David Dahl <ddahl@mozilla.com>
+David Dick <ddick@cpan.org>
+David Drinan <ddrinan@netscape.com>
+David Einstein <Deinst@world.std.com>
+Davide Prina
+David Finch
+David Gardiner <david.gardiner@unisa.edu.au>
+David Greenspan
+David Haas <haasd@cae.wisc.edu>
+David Hamp-Gonsalves
+David Humphrey <david.humphrey@senecac.on.ca>
+David Hyatt <hyatt@mozilla.org>
+David James
+David J. Fiddes <D.J.Fiddes@hw.ac.uk>
+David Andersson <liorean@gmail.com>
+<davidm@netscape.com>
+David P. Caldwell <inonit@inonit.com>
+David Rajchenbach-Teller <dteller@mozilla.com>
+David Savage
+David S. Miller <davem@redhat.com>
+David Woodhouse <dwmw2@infradead.org>
+David Zbarsky <dzbarsky@gmail.com>
+Dean Tessman <dean_tessman@hotmail.com>
+<deneen@alum.bucknell.edu>
+Denis Antrushin <adu@sparc.spb.su>
+Denis Issoupov <denis@macadamian.com>
+Dennis Handly
+Derrick Rice <derrick.rice@gmail.com>
+<desale@netscape.com>
+diablohn
+Diane Trout <diane@ghic.org>
+Dietrich Ayala <dietrich@mozilla.com>
+Digital Creations 2, Inc
+Disruptive Innovations
+<djoham@criadvantage.com>
+Don Bragg <dbragg@netscape.com>
+Don Brown
+Don Cone <dcone@netscape.com>
+Doodle <doodle@scenergy.dfmk.hu>
+Doron Rosenberg <doronr@naboonline.com>
+Dorus Peelen
+Douglas Stebila <douglas@stebila.ca>
+Doug Sherk <dsherk@mozilla.com>
+Doug Turner <dougt@dougt.org>
+Doug Wright
+<drbrain-bugzilla@segment7.net>
+Drew Willcoxon <adw@mozilla.com>
+<drimbk@yahoo.com>
+Dr Stephen Henson <stephen.henson@gemplus.com>
+<d-russo@ti.com>
+Dr Vipul Gupta <vipul.gupta@sun.com>
+Dvornik Laszlo
+Ed Burns <edburns@acm.org>
+Edward Lee <edilee@mozilla.com>
+Egor Starkov <starkov.egor@gmail.com>
+Ehsan Akhgari <ehsan.akhgari@gmail.com>
+Eitan Isaacson <eitan@monotonous.org>
+Eli Friedman <sharparrow1@yahoo.com>
+Elika J. Etemad <fantasai@inkedblade.net>
+Emanuele Costa <emanuele.costa@gmail.com>
+emk <VYV03354@nifty.ne.jp>
+Emmanuel Pacaud <emmanuel.pacaud@free.fr>
+<epstein@tellme.com>
+Ere Maijala <emaijala@kolumbus.fi>
+Eric Anholt <eric@anholt.net>
+Eric Butler <zantifon@gmail.com>
+Eric Hedekar <afterthebeep@gmail.com>
+Eric J. Burley <ericb@neoplanet.com>
+Eric Promislow
+Eric Vaughan <evaughan@netscape.com>
+Erik Fabert <jerfa@yahoo.com>
+Erik van der Poel
+Erik Vold <erikvvold@gmail.com>
+Ervin Yan <ervin.yan@sun.com>
+Erwan Loisant <eloisant@gmail.com>
+Esben Mose Hansen <esben@oek.dk>
+Ethan Hugg
+Eugeniy Meshcheryakov <eugen@debian.org>
+Evan Yan <evan.yan@sun.com>
+Fabrice Desré <fabrice.desre@gmail.com>
+Federico Mena-Quintero <federico@novell.com>
+Felipe Gomes <felipc@gmail.com>
+Felix Fung <felix.the.cheshire.cat@gmail.com>
+<felix.meschberger@day.com>
+Feng Qian
+Fernando Herrera <fherrera@onirica.com>
+Fernando Jimenez <ferjmoreno@gmail.com>
+Flock Inc.
+Florian Boesch <pyalot@gmail.com>
+Florian Hänel <heeen@gmx.de>
+Florian Queze <florian@queze.net>
+Florian Scholz <elchi3@elchi3.de>
+<flying@dom.natm.ru>
+France Telecom Research and Development
+Franck
+Frank Tang <ftang@netscape.com>
+Frank Yan <fyan@mozilla.com>
+Franky Braem
+<franky@pacificconnections.com>
+Franz Sirl <Franz.Sirl-kernel@lauterbach.com>
+Frederic Plourde <frederic.plourde@polymtl.ca>
+Frederic Wang <fred.wang@free.fr>
+Fredrik Holmqvist <thesuckiestemail@yahoo.se>
+Fredrik Larsson <nossralf@gmail.com>
+Fritz Schneider <fritz@google.com>
+<fur@netscape.com>
+Gagan Saksena <gagan@netscape.com>
+Ming Gao <gaoming@cn.ibm.com>
+Garrett Arch Blythe
+Garrett Smith
+Garth Smedley <garths@oeone.com>
+Gary Kwong
+Gavin Sharp <gavin@gavinsharp.com>
+Gefferth Andras
+Geoff Lankow <geoff@darktrojan.net>
+George Kangas
+<george@vanous.com>
+George Wright <george@mozilla.com>
+Georgi Guninski <guninski@guninski.com>
+Georg Maaß <georg@bioshop.de>
+Gervase Markham <gerv@gerv.net>
+Gian-Carlo Pascutto <gpascutto@mozilla.com>
+Gianluca Turconi
+Gianugo Rabellino <gianugo@apache.org>
+Gijs Kruitbosch <gijskruitbosch@gmail.com>
+Ginn Chen <ginn.chen@sun.com>
+Giorgio Maone <g.maone@informaction.com>
+Girish Sharma <scrapmachines@gmail.com>
+Girts
+Giscard Girard
+Giuseppe Modugno
+Glen Nakamura <glen@imodulo.com>
+Glenn Randers-Pehrson <glennrp@gmail.com>
+Goldman Eleonora
+Google Inc.
+Gordon Sheridan <gordon@netscape.com>
+Graeme McCutcheon <graememcc_firefox@graeme-online.co.uk>
+Graydon Hoare <graydon@mozilla.com>
+Gregory Szorc <gps@mozilla.com>
+Grig Gheorghiu <grig@gheorghiu.net>
+Guillermo Robla Vicario <groblavicario@gmail.com>
+Guoxin Fan <gfan@sta.samsung.com>
+Gus Verdun <gustavoverdun@aol.com>
+<gwatmuff@geographicweb.com.au>
+Haamed Gheibi <gheibi@metanetworking.com>
+Håkan Waara <hwaara@gmail.com>
+Halacsy Peter
+<hannes@helma.at>
+Hanno Boeck <hanno@hboeck.de>
+Hans-Andreas Engel
+Harri Pitkanen
+Harshal Pradhan <keeda@hotpop.com>
+Heather Arthur <fayearthur@gmail.com>
+Heikki Toivonen <heikki@netscape.com>
+Hein Roehrig
+Henrik Gemal <mozilla@gemal.dk>
+Henrik Skupin <hskupin@gmail.com>
+Henri Sivonen <hsivonen@iki.fi>
+Henry Cejtin
+Henry Sobotka <sobotka@axess.com>
+Heriot-Watt University
+Hermann Schwab <hhschwab@gmail.com>
+Hermecz Vajk
+Hernan Rodriguez Colmeiro <colmeiro@gmail.com>.
+Hewlett-Packard Company
+Hiroshi Shimoda <piro@p.club.ne.jp>
+Hoa Nguyen <hoa.nguyen@intel.com>
+Honza Bambas <honzab@firemni.cz>
+Howard Chu <hyc@symas.com>
+Hubbie Shaw
+Hubert Figuière <hub@mozilla.com>
+Ian Gilman <ian@iangilman.com>
+Ian Hickson <ian@hixie.ch>
+Ian McGreer <mcgreer@netscape.com>
+Ian Melven <imelven@mozilla.com>
+Ian Oeschger
+IBM Corporation
+i-DNS.net International
+<igor3@apochta.com>
+Igor Bazarny <igor.bazarny@gmail.com>
+Igor Bukanov <igor@mir2.org>
+igor@fastmail.fm
+igor@icesoft.no
+Igor Tandetnik
+Ilya Konstantinov <mozilla-code@future.shiny.co.il>
+Intel Corporation
+Jaakko Kiviluoto <jaakko.kiviluoto@digia.com>
+Jacek Piskozub <piskozub@iopan.gda.pl>
+Jacob Bramley <Jacob.Bramely@arm.com>
+Jae-Seong Lee-Russo <lusian@gmail.com>
+James Boston <mozilla@jamesboston.ca>
+James Bunton <jamesbunton@fastmail.fm>
+James Justin Harrell
+James L. Nance
+James Ross <silver@warwickcompsoc.co.uk>
+James Willcox <jwillcox@mozilla.com>
+Jamie Zawinski <jwz@jwz.org>
+Jan Bambas <honzab@firemni.cz>
+Jan Darmochwal
+Jan de Mooij <jandemooij@gmail.com>
+Jan Horak <jhorak@redhat.com>
+Jan-Klaas Kollhof
+Jan Odvarko <odvarko@gmail.com>
+Jan Varga <Jan.Varga@gmail.com>
+Jan Wrobel <wrobel@blues.ath.cx>
+Jared Wein <jwein@mozilla.com>
+Jason Barnabe <jason_barnabe@fastmail.fm>
+Jason Duell <jduell.mcbugs@gmail.com>
+Jason Eager <jce2@po.cwru.edu>
+Jason Sachs <jmsachs@gmail.com>
+Jason Kersey <kerz@netscape.com>
+Jason Orendorff <jorendorff@mozilla.com>
+Jason Voll <jvoll@mozilla.com>
+Javier Delgadillo <javi@netscape.com>
+Javier Pedemonte <pedemont@us.ibm.com>
+Jay Patel <jay@mozilla.org>
+Jean-Francois Ducarroz <ducarroz@netscaape.com>
+Jean-Jacques Enser <jj@netscape.com>
+Jeff Gilbert <jgilbert@mozilla.com>
+Jeff Hammel <jhammel@mozilla.com>
+Jeff Muizelaar <jmuizelaar@mozilla.com>
+Jeff Thompson
+Jeff Walden <jwalden+bmo@mit.edu>
+Jens Bannmann <jens.b@web.de>
+Jens Hatlak <jh@junetz.de>
+Jeremias Bosch <jeremias.bosch@gmail.com>
+Jeremy D. Lea <reg@openpave.org>
+Jeroen Dobbelaere <jeroen.dobbelaere@acunia.com>
+<Jerry.Kirk@Nexwarecorp.com>
+Jesper Kristensen <mail@jesperkristensen.dk>
+Jesse Ruderman <jruderman@gmail.com>
+Jessica Blanco <jblanco@us.ibm.com>
+<jhs@lysator.liu.se>
+<ji_bo@yahoo.com>
+Jignesh Kakadiya <jigneshhk1992@gmail.com>
+Jim Blandy <jimb@mozilla.com>
+Jim Chen <jchen@mozilla.com>
+Jim Grandy
+Jim Ley <jim@jibbering.com>
+Jim Mathies <jmathies@mozilla.com>
+Jim Nance <jim_nance@yahoo.com>
+<jim-patterson@ncf.ca>
+Jim Roskind <jar@netscape.com>
+<jlaprise@delanotech.com>
+Joachim Kuebart
+Joaquin Cuenca Abela
+Joe Drew <joe@drew.ca>
+Joe Hewitt <hewitt@netscape.com>
+Joe Hughes <joe@retrovirus.com>
+Joel Maher <joel.maher@gmail.com>
+joerg.schaible@gmx.de
+Joe Walker <jwalker@mozilla.com>
+Joey Armstrong <joey@mozilla.com>
+Joey Minta <jminta@gmail.com>
+Johan Charlez <johan.charlez@gmail.com>
+Johann Petrak <johann@ai.univie.ac.at>
+Johnathan Nightingale <johnath@mozilla.com>
+John Bandhauer <jband@netscape.com>
+John B. Keiser
+John C. Griggs <johng@corel.com>
+John Daggett <jdaggett@mozilla.com>
+John Fairhurst <john_fairhurst@iname.com>
+John Ford <jhford@mozilla.com>
+John Gardiner Myers <jgmyers@speakeasy.net>
+John Gaunt <jgaunt@netscape.com>
+John Hanely
+John Morkel <jmorkel@gmail.com>
+John Morrison <jrgmorrison@aol.com>
+Johnny Stenback <jst@mozilla.com>
+John Resig <jresig@mozilla.com>
+John Schneider
+John Sun <john.sun@sun.com>
+John Taylor <jtaylor@netscape.com>
+John Zhang <jzhang@aptana.com>
+Jonas Jonsson <mozilla.nospam@fatbrain.org>
+Jonas Sicking <jonas@sicking.cc>
+Jonathan Granrose <granrose@netscape.com>
+Jonathan Griffin <jgriffin@mozilla.com>
+Jonathan Hage <hage.jonathan@gmail.com>
+Jonathan Kew <jfkthame@gmail.com>
+Jonathan Protzenko <jonathan.protzenko@gmail.com>
+Jonathan Watt <jwatt@jwatt.org>
+Jonathan Wilson <jonwil@tpgi.com.au>
+Jonathon Jongsma <jonathon.jongsma@collabora.co.uk>
+Jon Herron <leftturnsolutions@yahoo.com>
+Jono DiCarlo <jdicarlo@mozilla.com>
+Joonas Pihlaja <jpihlaja@cc.helsinki.fi>
+Jorge Villalobos <jorge@mozilla.com>
+Jory A. Pratt <geekypenguin@gmail.com>
+Josh Aas <josh@mozilla.com>
+Josh Lurz <jlurz24@gmail.com>
+Josh Matthews <josh@joshmatthews.net>
+Joshua M. <soapyhamhocks@gmail.com>
+Joshua Randall <jcrandall@alum.mit.edu>
+J. Paul Reed <preed@mozilla.com>
+Juan Lang
+Julian Seward <jseward@acm.org>
+Julian Viereck <jviereck@mozilla.com>
+Julien Lafon <julien.lafon@gmail.com>
+Julien Lecomte
+Jungshik Shin <jshin@mailaps.org>
+Justin Arthur <justinarthur@ieee.org>
+Justin Bradford <jab@atdot.org>
+Justin Dolske <dolske@mozilla.com>
+Justin Lebar <justin.lebar@gmail.com>
+Kai Engert <kaie@redhat.com>
+Kailas Patil <patilkr24@gmail.com>
+Kan-Ru Chen <kchen@mozilla.com>
+Karl Tomlinson <karlt+@karlt.net>
+Karsten Sperling <spiff@phreax.net>
+Kartikaya Gupta <kgupta@mozilla.com>
+Kaspar Brand <mozcontrib@velox.ch>
+Kathleen Brade <brade@pearlcrescent.com>
+Katsuhiko Momoi
+Keith Packard <keithp@keithp.com>
+Keith Rarick <kr@xph.us>
+Keith Schwarz <kschwarz@mozilla.com>
+Keith Visco <kvisco@ziplink.net>
+Ken Key <key+mozilla@ksquared.net>
+Kenneth Herron <kherron@fmailbox.com>
+Kenny Heaton <kennyheaton@gmail.com>
+Kevin Gerich <kevin@kmgerich.com>
+Kevin Hendricks <kevin.hendricks@sympatico.ca>
+Kevin McCluskey <kmcclusk@netscape.com>
+Kevin Puetz <puetzk@iastate.edu>
+<khanson@netscape.com>
+Kin Blas <kin@netscape.com>
+Kipp E.B. Hickman
+Kishore Arepalli <kishore.arepalli@gmail.com>
+<k.mike@gmx.net>
+Konstantin Mirny
+Korea Information Security Agency
+Kris Maglione <maglione.k@gmail.com>
+Kristian Høgsberg <krh@redhat.com>
+Kurt Lidl <lidl@pix.net>
+Kyle Huey <khuey@kylehuey.com>
+Kyle Machulis <kyle@nonpolynomial.com>
+Kyle Simpson <ksimpson@mozilla.com>
+Kyle Yuan <kyle.yuan@sun.com>
+Landry Breuil <landry@openbsd.org>
+Lan Qiang <jameslan@gmail.com>
+Larry Fitzpatrick <lef@opentext.com>
+Lars Erdmann
+Lars Knoll <knoll@kde.org>
+LastPass.com
+László Jánszky
+László Németh <nemethl@gyorsposta.hu>
+Laurent Jouanneau <laurent.jouanneau@disruptive-innovations.com>
+Lenka Fibikova <fibikova@exp-math.uni-essen.de>
+Leon Sha <leon.sha@oracle.com>
+Lev Serebryakov <lev@serebryakov.spb.ru>
+Le Zhang <r0bertz@gentoo.org>
+Liam Davis-Mead
+Lidong <lidong520@263.net>
+Lina Kemmel <lkemmel@il.ibm.com>
+Louie Zhao <louie.zhao@sun.com>
+Lubos Ures
+Lucas Rocha <lucasr@mozilla.com>
+Luke Wagner <lw@mozilla.com>
+<Lupin.wp@gmail.com>
+Maha Abou El Rous <mahar@eg.ibm.com>
+Makoto Kato <m_kato@ga2.so-net.ne.jp>
+Malcolm Rowe <malcolm-bmo@farside.org.uk>
+Malcolm Smith <malsmith@cs.rmit.edu.au>
+Malini Das <mdas@mozilla.com>
+Manish Singh <manish@flock.com>
+Marc Attinasi
+Marc Bevand <bevand_m@epita.fr>
+Marcin Lubonski
+Marc Mulcahy <marc.mulcahy@sun.com>
+Marco Bonardo <mak77@bonardo.net>
+Marco Castelluccio <mar.castelluccio@studenti.unina.it>
+Marco Fabbri
+Marco Pesenti Gritti <marco@gnome.org>
+Margaret Leibovic <margaret.leibovic@gmail.com>
+Marina Samuel <msamuel@mozilla.com>
+Mark Banner <bugzilla@standard8.plus.com>
+Mark Capella <markcapella@twcny.rr.com>
+Mark Cote <mcote@mozilla.com>
+Mark Finkle <mark.finkle@gmail.com>
+Mark Hammond <markh@activestate.com>
+Mark Mentovai <mark@moxienet.com>
+Mark Pilgrim <pilgrim@gmail.com>
+Mark Smith <mcs@pearlcrescent.com>
+Mark Steele <mwsteele@gmail.com>
+Mark Straver <moonchild@palemoon.org>
+Markus G. Kuhn <mkuhn@acm.org>
+Markus Stange <mstange@themasta.com>
+Martijn Pieters <mj@digicool.com>
+Martijn Wargers <martijn.martijn@gmail.com>
+Martin Hassman <hassman@czilla.cz>
+Martin Honnen <martin.honnen@gmx.de>
+Martin McNickle <mmcnickle@gmail.com>
+Martin Schroeder <mschroeder@mozilla.x-home.org>
+Martin Stransky <stransky@redhat.com>
+Martin v. Loewis <martin@v.loewis.de>
+Martin Zvieger <martin.zvieger@sphinx.at>
+Masakazu Takahashi
+Masaki Katakai <katakai@japan.sun.com>
+Masatoshi Kimura <VYV03354@nifty.ne.jp>
+Masayuki Nakano <masayuki@d-toybox.com>
+Mathias Hasselmann <mathias.hasselmann@gmx.de>
+Mathieu Fenniak
+Mats Palmgren <matspal@gmail.com>
+Matt Brubeck <mbrubeck@mozilla.com>
+Matt Crocker <matt@songbirdnest.com>
+Matt Fisher <matt@netscape.com>
+Matthew Gregan <kinetik@flim.org>
+Matthew Noorenberghe <mnoorenberghe@mozilla.com>
+Matt Woodrow <mwoodrow@mozilla.com>
+Max Stepin <maxstepin@gmail.com>
+<mcmullen@netscape.com>
+Meena Vyas <meena.vyas@oracle.com>
+Mehdi Mulani <mmulani@mozilla.com>
+Merle Sterling <msterlin@us.ibm.com>
+<mff@research.att.com>
+Michael Ash <kal_el_1938@hotmail.com>
+Michael Daumling <daumling@adobe.com>
+Michael Emmel <mike.emmel@gmail.com>
+Michael Hanson <mhanson@mozilla.com>
+Michael J. Fromberger <sting@linguist.dartmouth.edu>
+Michael Johnston <special.michael@gmail.com>
+Michael Judge <mjudge@netscape.com>
+Michael Kohler <michaelkohler@live.com>
+Michael Kraft <morac99-firefox2@yahoo.com>
+Michael Lipp
+Michael Lowe <michael.lowe@bigfoot.com>
+Michael Martin
+michaelp
+Michael Ratcliffe <mratcliffe@mozilla.com>
+Michael Roovers
+Michael Ventnor <m.ventnor@gmail.com>
+Michael Wu <flamingice@sourmilk.net>
+Michael Yoshitaka Erlewine <mitcho@mitcho.com>
+Michal Novotny <michal.novotny@gmail.com>
+Michiel van Leeuwen <mvl@exedo.nl>
+Mihai Șucan <mihai.sucan@gmail.com>
+Miika Jarvinen <mjarvin@gmail.com>
+Mikeal Rogers <mikeal.rogers@gmail.com>
+Mike Beltzner <beltzner@mozilla.com>
+Mike Connor <mconnor@steelgryphon.com>
+Mike Hommey <mh@glandium.org>
+Mike Kaplinskiy <mike.kaplinskiy@gmail.com>
+Mike Kaply
+Mike Kowalski <mikejk@ameritech.net>
+Mike Kristoffersen <mikek@mikek.dk>
+Mike McCabe <mccabe@netscape.com>
+Mike Pinkerton <pinkerton@netscape.com>
+Mike Shaver <shaver@off.net>
+Milen Nankov
+Milind <sukhisoul@yahoo.com>
+Mitchell Field <mitch_1_2@live.com.au>
+Mitchell Stoltz <mstoltz@netscape.com>
+Mitesh Shah <mitesh@netscape.com>
+M Joonas Pihlaja <jpihlaja@cc.helsinki.fi>
+Mohammad R. Haghighat <mohammad.r.haghighat@intel.com>
+Mook <mook.moz@gmail.com>
+Morten Nilsen <morten@nilsen.com>
+Mounir Lamouri <mounir.lamouri@mozilla.com>
+moz_bug_r_a4
+Mozdev Group, Inc
+Mozilla Foundation
+Mozilla Japan
+<mozilla@pdavis.cx>
+Mrinal Kant <mrinal.kant@gmail.com>
+Ms2ger <ms2ger@gmail.com>
+Murali S R <murali.sr92@yahoo.com>
+Muzaffar Mahkamov <mmahkamov@eisst.com>
+Myk Melez <myk@mozilla.org>
+<myngs@hotmail.com>
+Nagendra Modadugu <ngm@google.com>
+Nagy Viktor
+Naoki Hotta <nhotta@netscape.com>
+Nao Toyamo
+Nate Nielsen <nielsen@memberwebs.com>
+Nattachai Ungsriwong <nattachai@gmail.com>
+Neil Deakin <enndeakin@gmail.com>
+Neil Rashbrook <neil@parkwaycc.co.uk>
+Nelson Bolyard <nelson@bolyard.me>
+Neo Liu <nian.liu@sun.com>
+Netscape Communications Corporation
+Nicholas Nethercote <nnethercote@mozilla.com>
+Nick Fitzgerald <nfitzgerald@mozilla.com>
+Nickolay Ponomarev <asqueella@gmail.com>
+Niels Provos <niels@google.com>
+Nikhil Marathe <nsm.nikhil@gmail.com>
+Nils Gura <nils.gura@sun.com>
+Nils Larsch <nla@trustcenter.de>
+Nils Maier <maierman@web.de>
+Ningjie Chen <chenn@email.uc.edu>
+Nino D'Aversa <ninodaversa@gmail.com>
+Nippon Telegraph and Telephone Corporation
+Nochum Sossonko <highmind63@gmail.com>
+Nokia Corporation
+Noll Janos
+Norris Boyd <nboyd@atg.com>
+Novell Corporation
+NVIDIA Corporation
+OEone Corporation
+<okin7@yahoo.fr>
+Oleg Romashin <romaxa@gmail.com>
+Oliver Hunt
+Olivier Cahagne
+Olivier Gerardin <ogerardin@vo.lu>
+Olli Pettay <Olli.Pettay@helsinki.fi>
+Ondrej Brablc <ondrej@allpeers.com>
+Oracle Corporation
+Oscar Fogelberg <osfo@home.se>
+Owen Taylor <otaylor@redhat.com>
+Øyvind Kolås <pippin@gimp.org>
+Pamela Greene <pamg.bugs@gmail.com>
+Panagiotis Astithas <past@mozilla.com>
+Paolo Amadini <http://www.amadzone.org/>
+Patipat Susumpow <kengggg@gmail.com>
+Patrick Beard <beard@netscape.com>
+Patrick Fey <bugzilla@nachtarbeiter.net>
+Patrick McManus <mcmanus@ducksong.com>
+Patrick Walton <pwalton@mozilla.com>
+Pattara Kiatisevi <ott@linux.thai.net>
+Paul Ashford <arougthopher@lizardland.net>
+Paul Biggar <pbiggar@mozilla.com>
+Paul Kocher
+Paul Kurczaba
+Paul O'Shannessy <paul@oshannessy.com>
+Paul Rouget <paul@mozilla.com>
+Paul Sandoz <paul.sandoz@sun.com>
+Pavel Cvrcek
+Pawel Chmielowski
+PenPal
+Pete Collins <petejc@collab.net>
+Peter Annema <disttsc@bart.nl>
+Peter Bajusz <hyp-x@inf.bme.hu>
+Peter Lubczynski <peterl@netscape.com>
+Peter Naulls
+Peter Parente <parente@cs.unc.edu>
+Peter Seliger
+Peter Van der Beken <peter@propagandism.org>
+Peter van der Woude
+Peter Weilbacher <mozilla@weilbacher.org>
+Pete Zha <pete.zha@sun.com>
+Petr Kostka <petr.kostka@st.com>
+Philipp Kewisch <mozilla@kewis.ch>
+Philipp Vogt <vogge@vlbg.dhs.org>
+Philipp von Weitershausen <philipp@weitershausen.de>
+Philip Taylor <philip.taylor@cl.cam.ac.uk>
+Phil Ringnalda
+Phil Schwartau <pschwartau@meer.net>
+Pierre Chanial <chanial@noos.fr>
+Pierre Phaneuf <pp@ludusdesign.com>
+Pierre Tardy <tardyp@gmail.com>
+POTI Inc
+Prabhat Hegde <prabhat.hegde@sun.com>
+Pranav Ravichandran <prp.1111@gmail.com>
+Prasad Sunkari <prasad@medhas.org>
+Priit Laes
+Proofpoint, Inc.
+Q42 <http://www.q42.nl>
+Radha Kulkarni <radha@netscape.com>
+Ramanathan Guha <guha@netscape.com>
+Ramiro Estrugo <ramiro@netscape.com>
+Randell Jesup
+Randolph Chung <tausq@debian.org>
+Rangan Sen <rangansen@netscape.com>
+Raúl Porcel <armin76@gentoo.org>
+Raymond Lee <raymond@appcoast.com>
+Red Hat, Inc
+Rene Engelhard
+Rene Pronk <r.pronk@its.tudelft.nl>
+Reto Laemmler
+<rhp@netscape.com>
+Ria Klaassen
+Richard C. Swift <swift@netscape.com>
+Richard L. Walsh <dragtext@e-vertise.com>
+Richard Newman <rnewman@mozilla.com>
+Richard Verhoeven <river@win.tue.nl>
+Richard Walsh
+Rich Dougherty
+Rich Salz
+Rich Walsh <dragtext@e-vertise.com>
+Rick Gessner <rickg@netscape.com>
+R.J. Keller <rlk@trfenv.com>
+Rob Arnold <tellrob@gmail.com>
+Rob Campbell <rcampbell@mozilla.com>
+Robert Accettura <robert@accettura.com>
+Robert Churchill <rjc@netscape.com>
+Robert Ginda <rginda@hacksrus.com>
+Robert Kaiser <kairo@kairo.at>
+Robert Longson <longsonr@gmail.com>
+Robert Miner <rminer@geomtech.com>
+Robert O'Callahan <robert@ocallahan.org>
+Roberto Estrada <roberto.estrada@yahoo.es>
+Robert Relyea <rrelyea@redhat.com>
+Robert Sayre <sayrer@gmail.com>
+Robert Sesek <rsesek@bluestatic.org>
+Robert Strong <robert.bugzilla@gmail.com>
+Robin Lu <robin.lu@sun.com>
+Rob McCool
+Rod Spears <rods@netscape.com>
+Roger B. Sidje <rbs@maths.uq.edu.au>
+<rogerl@netscape.com>
+<rokicki@instantis.com>
+Roland Mainz <roland.mainz@informatik.med.uni-giessen.de>
+Roman Ivanov <thingol@mail.ru>
+Ronny Perinke <ronny.perinke@gmx.de>
+Roozbeh Pournader <roozbeh@sharif.edu>
+Roy Frostig <rfrostig@mozilla.com>
+Roy Yokoyama <yokoyama@netscape.com>
+RSA Security, Inc
+Russell King <rmk@arm.linux.org.uk>
+Rusty Lynch <rusty.lynch@intel.com>
+Ryan Cassin <rcassin@supernova.org>
+Ryan Flint <rflint@dslr.net>
+Ryan Jones <sciguyryan@gmail.com>
+Ryan VanderMeulen <ryanvm@gmail.com>
+Ryoichi Furukawa <oliver@1000cp.com>
+sagdjb@softwareag.com
+Samir Gehani <sgehani@netscape.com>
+Sammy Ford
+Samphan Raruenrom
+Samuel Sieb <samuel@sieb.net>
+Sarlos Tamas
+scole@planetweb.com
+Scooter Morris <scootermorris@comcast.net>
+Scott Collins <scc@ScottCollins.net>
+Scott MacGregor <mscott@mozilla.org>
+Scott Putterman <putterman@netscape.com>
+Sean Cotter <cotter@netscape.com>
+Sean Dunn <seanedunn@yahoo.com>
+Sean Echevarria <sean@beatnik.com>
+Sean McMurray
+Sean Stangl <sstangl@mozilla.com>
+Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Sebastian Kromp <46b@gulli.com>
+Seno Aiko <Seno.Aiko@gmail.com>
+Serge Gautherie <sgautherie.bz@free.fr>
+Sergei Dolgov <sergei_d@fi.tartu.ee>
+Sergey Novikov <sergeyn@google.com>
+Seth Spitzer <sspitzer@mozilla.org>
+sfraser@netscape.com
+shadowpage
+Shailendra N Jain<shailen.n.jain@gmail.com>
+Shaohua Wen
+Shawn Gong <shawn@mozilla.com>
+Shawn Wilsher <me@shawnwilsher.com>
+Sheueling Chang Shantz <sheueling.chang@sun.com>,
+Shi-Jun He
+Shoji Matsumoto <shom@vinelinux.org>
+Shoshannah Forbes <xslf@xslf.com>
+shutdown@flashmail.com
+Shyjan Mahamud <mahamud@cs.cmu.edu>
+Shy Shalom <shooshX@gmail.com>
+Siarhei Siamashka <siarhei.siamashka@gmail.com>
+Siddharth Agarwal <sid.bugzilla@gmail.com>
+Sid Stamm <sid@mozilla.com>
+Silvia Zhao <silvia.zhao@sun.com>
+Silviu Trasca
+Simeon Morrison <smorrison@gte.com>
+Simmule Turner
+Simon Brouwer
+Simon Bünzli <zeniko@gmail.com>
+Simon Fraser <smfr@smfr.org>
+Simon Montagu <smontagu@smontagu.org>
+Simon Wilkinson <simon@sxw.org.uk>
+simonzack
+Sindre Dammann <sindrebugzilla@gmail.com>
+Sinker Li <thinker@codemud.net>
+Sion Fraser <sfraser@netscape.com>
+Siraj Razick <siraj.razick@collabora.co.uk>
+Sjoerd Visscher <sjoerd@w3future.com>
+Slavomir Katuscak <slavomir.katuscak@sun.com>
+smorrison@gte.com
+Soeren Munk Skroeder
+Sonja Mirtitsch
+Sonny Piers <sonny.piers@gmail.com>
+Søren Sandmann <sandmann@daimi.au.dk>
+Spyros Livathinos <livathinos.spyros@gmail.com>
+Sriram Ramasubramanian <sriram@mozilla.com>
+Srirang G Doddihal <brahmana@doddihal.com>
+Stanford University
+Stan Shebs <stanshebs@earthlink.net>
+Stefan Borggraefe
+Stefan Hundhammer <HuHa-zilla@gmx.de>
+Stefanik Gábor
+Stefan Sitter <ssitter@gmail.com>
+Steffen Imhof <steffen.imhof@gmail.com>
+Steffen Wilberg <steffen.wilberg@web.de>
+Stephen Blackheath <entangled.mooched.stephen@blacksapphire.com>
+Stephen Donner
+Stephen Fung <fungstep@hotmail.com>
+Stephen Horlander <shorlander@mozilla.com>
+Stephen Lamm <slamm@netscape.com>
+Steve Chapel <steven.chapel@sbcglobal.net>
+Steve Clark <buster@netscape.com>
+Steve Dagley <sdagley@netscape.com>
+Steve Fink <sfink@mozilla.org>
+Steve Lamm <slamm@netscape.com>
+Steve Meredith <smeredith@netscape.com>
+Steve Morse <morse@netscape.com>
+Steven Garrity <steven@silverorange.com>
+Steven Michaud <smichaud@pobox.com>
+Steve Roussey
+Steve Swanson <steve.swanson@mackichan.com>
+Stuart Morgan <stuart.morgan@alumni.case.edu>
+Stuart Parmenter <pavlov@pavlov.net>
+Sungjoon Steve Won <stevewon@gmail.com>
+Sun Microsystems, Inc.
+Suresh Duddi <dp@netscape.com>
+<svendtofte@svendtofte.com>
+Sylvain Pasche <sylvain.pasche@gmail.com>
+Takayuki Tei <taka@netscape.com>
+Takeshi Ichimaru <ayakawa.m@gmail.com>
+Takuro Ashie <ashie@clear-code.com>
+Tanner M. Young <mozilla@alyoung.com>
+Taras Glek <tglek@mozilla.com>
+Tatiana Meshkova <tanya.meshkova@gmail.com>
+Ted Mielczarek <ted@mielczarek.org>
+Tero Koskinen <tero.koskinen@iki.fi>
+Terrence Cole <terrence@mozilla.com>
+Terry Hayes <thayes@netscape.com>
+Teune van Steeg <t.vansteeg@gmail.com>
+The Hewlett-Packard Company
+The MITRE Corporation
+The Nokia Corporation
+Theppitak Karoonboonyanan <thep@linux.thai.net>
+The University of Queensland
+Thiemo Seufer <seufer@csv.ica.uni-stuttgart.de>
+Thomas Blatter <bebabo@swissonline.ch>
+Thomas de Grenier de Latour <tom.gl@free.fr>
+Thomas K. Dyas <tom.dyas@gmail.com>
+Tim Copperfield <timecop@network.email.ne.jp>
+timeless <timeless@mac.com>
+Tim Hill <tim@prismelite.com>
+Tim Miller <mille449@gmail.com>
+Timothy B. Terriberry <tterriberry@mozilla.com>
+Timothy Nikkel <tnikkel@gmail.com>
+Timothy Wall <twalljava@dev.java.net>
+Timothy Watt <riceman+moz@mail.rit.edu>
+Tim Rowley <tor@cs.brown.edu>
+Tim Taubert <tim.taubert@gmx.de>
+Tom Brinkman <reportbase@gmail.com>
+Tom Germeau <tom.germeau@epigoon.com>
+Tomi Leppikangas <tomi.leppikangas@oulu.fi>
+Tom Kneeland <tomk@mitre.org>
+Tom Pixley <joki@netscape.com>
+Tom Schuster <evilpies@gmail.com>
+Tom St Denis, tomstdenis@iahu.ca
+Tom Tromey
+Tony Chang <tony@ponderer.org>
+Tor Lillqvist
+Travis Bogard <travis@netscape.com>
+Trent Mick <TrentM@ActiveState.com>
+Trevor Fairey <tnfairey@gmail.com>
+Trevor Saunders <trev.saunders@gmail.com>
+Troy Farrell <troy@entheossoft.com>
+Tuukka Tolvanen <tt@lament.cjb.net>
+T. Zachary Laine <whatwasthataddress@gmail.com>
+Ulrich Drepper <drepper@redhat.com>
+University of Southern California
+Uri Bernstein <uriber@gmail.com>
+Varga Daniel
+Vee Satayamas <vsatayamas@gmail.com>
+Victor Porof <vporof@mozilla.com>
+Vidur Apparao <vidur@netscape.com>
+Vilya Harvey <vilya@nag.co.uk>
+Vincent Béron <vberon@hermes.usherb.ca>
+Vipul Gupta <vipul.gupta@sun.com>
+Viswanath Ramachandran <vishy@netscape.com>
+Vivien Nicolas <21@vingtetun.org>
+Vladan Djeric <vdjeric@mozilla.com>
+Vladimir Vukicevic <vladimir@pobox.com>
+Vlad Sukhoy <vladimir.sukhoy@gmail.com>
+WADA <m-wada@japan.com>
+Waldemar Horwat <waldemar@acm.org>
+Walter Meinl <wuno@lsvw.de>
+Warren Harris <warren@netscape.com>
+Wellington Fernando de Macedo <wfernandom2004@gmail.com>
+Werner Sharp
+Wesley Garland
+Wesley Johnston <wjohnston@mozilla.com>
+Will Guaraldi <will.guaraldi@pculture.org>
+William A. Law <law@netscape.com>
+William B. Ackerman
+William Chen <wchen@mozilla.com>
+William Cook <william.cook@crocodile-clips.com>
+William Jon McCann <william.jon.mccann@gmail.com>
+William Lachance <wlachance@mozilla.com>
+William Price <bugzilla@mob.rice.edu>
+William R. Price <wrprice@alumni.rice.edu>
+<william.tan@i-dns.net>
+Wladimir Palant <trev@gtchat.de>
+Wolfgang Rosenauer <wr@rosenauer.org>
+<wtam@bigfoot.com>
+Wyllys Ingersoll <wyllys.ingersoll@sun.com>
+<x00000000@freenet.de>
+Yueheng Xu <yueheng.xu@intel.com>
+Yuh-Ruey Chen
+Yury <async.processingjs@yahoo.com>
+Yury Delendik
+Zachary Weinberg <zweinberg@mozilla.com>
+Zach Linder <zakness@gmail.com>
+Zach Lipton <zach@zachlipton.com>
+Zack Rusin <zack@kde.org>
+<zack-weg@gmx.de>
+Zack Weinberg <zweinberg@mozilla.com>
+Zbigniew Braniecki <gandalf@e-gandalf.net>
+<zen-parse@gmx.net>
+Zero-Knowledge Systems, Inc
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..c180917
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,14 @@
+Please see the file toolkit/content/license.html for the copyright licensing
+conditions attached to this codebase, including copies of the licenses
+concerned.
+
+For more information about source code licensing, see:
+http://www.palemoon.org/licensing.shtml
+
+You are not granted rights or licenses to the intellectual property or trademarks
+of the Mozilla Foundation, Moonchild Productions, or any other party, including
+without limitation the Pale Moon name or logo.
+
+Binary versions of Pale Moon are subject to the Pale Moon redistribution license.
+For more information, see: http://www.palemoon.org/redist.shtml
+
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..cc755b1
--- /dev/null
+++ b/README.md
@@ -0,0 +1,16 @@
+# Pale Moon web browser
+
+This is the source code for the Pale Moon web browser, an independent browser derived from Firefox/Mozilla community code. The source tree is
+laid out in a "comm-central" style configuration where only the code specific to Pale Moon is kept in this repository.
+
+The shared Unified XUL Platform source code is referenced here as a git submodule contained in the `platform/` directory and is required to build the application.
+
+## Getting the platform sub-module
+`git submodule init && git submodule update`
+
+## Resources
+
+ * [Build Pale Moon for Windows](https://forum.palemoon.org/viewtopic.php?f=19&t=13556)
+ * [Build Pale Moon for Linux](https://developer.palemoon.org/Developer_Guide:Build_Instructions/Pale_Moon/Linux)
+ * [Pale Moon home page](http://www.palemoon.org/)
+ * [Code of Conduct, Contributing, and UXP Coding style](https://github.com/MoonchildProductions/UXP/tree/master/docs)
diff --git a/aclocal.m4 b/aclocal.m4
new file mode 100644
index 0000000..ae39be3
--- /dev/null
+++ b/aclocal.m4
@@ -0,0 +1,27 @@
+dnl
+dnl Local autoconf macros used with UXP
+dnl The contents of this file are under the Public Domain.
+dnl
+
+builtin(include, platform/build/autoconf/toolchain.m4)dnl
+builtin(include, platform/build/autoconf/config.status.m4)dnl
+builtin(include, platform/build/autoconf/nspr.m4)dnl
+builtin(include, platform/build/autoconf/nss.m4)dnl
+builtin(include, platform/build/autoconf/pkg.m4)dnl
+builtin(include, platform/build/autoconf/codeset.m4)dnl
+builtin(include, platform/build/autoconf/altoptions.m4)dnl
+builtin(include, platform/build/autoconf/mozprog.m4)dnl
+builtin(include, platform/build/autoconf/acwinpaths.m4)dnl
+builtin(include, platform/build/autoconf/lto.m4)dnl
+builtin(include, platform/build/autoconf/frameptr.m4)dnl
+builtin(include, platform/build/autoconf/compiler-opts.m4)dnl
+builtin(include, platform/build/autoconf/zlib.m4)dnl
+builtin(include, platform/build/autoconf/expandlibs.m4)dnl
+
+MOZ_PROG_CHECKMSYS()
+
+# Read the user's .mozconfig script. We can't do this in
+# configure.in: autoconf puts the argument parsing code above anything
+# expanded from configure.in, and we need to get the configure options
+# from .mozconfig in place before that argument parsing code.
+dnl MOZ_READ_MOZCONFIG(platform)
diff --git a/build/autoconf/config.guess b/build/autoconf/config.guess
new file mode 100644
index 0000000..d5d667d
--- /dev/null
+++ b/build/autoconf/config.guess
@@ -0,0 +1,1454 @@
+#! /bin/sh
+# Attempt to guess a canonical system name.
+# Copyright 1992-2016 Free Software Foundation, Inc.
+
+timestamp='2016-03-24'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
+#
+# Originally written by Per Bothner; maintained since 2000 by Ben Elliston.
+#
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess
+#
+# Please send patches to <config-patches@gnu.org>.
+
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright 1992-2016 Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help" >&2
+ exit 1 ;;
+ * )
+ break ;;
+ esac
+done
+
+if test $# != 0; then
+ echo "$me: too many arguments$help" >&2
+ exit 1
+fi
+
+trap 'exit 1' 1 2 15
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# Portable tmp directory creation inspired by the Autoconf team.
+
+set_cc_for_build='
+trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
+trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
+: ${TMPDIR=/tmp} ;
+ { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
+ { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
+ { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
+ { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
+dummy=$tmp/dummy ;
+tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
+case $CC_FOR_BUILD,$HOST_CC,$CC in
+ ,,) echo "int x;" > $dummy.c ;
+ for c in cc gcc c89 c99 ; do
+ if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
+ CC_FOR_BUILD="$c"; break ;
+ fi ;
+ done ;
+ if test x"$CC_FOR_BUILD" = x ; then
+ CC_FOR_BUILD=no_compiler_found ;
+ fi
+ ;;
+ ,,*) CC_FOR_BUILD=$CC ;;
+ ,*,*) CC_FOR_BUILD=$HOST_CC ;;
+esac ; set_cc_for_build= ;'
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 1994-08-24)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+ PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+case "${UNAME_SYSTEM}" in
+Linux|GNU|GNU/*)
+ # If the system lacks a compiler, then just pick glibc.
+ # We could probably try harder.
+ LIBC=gnu
+
+ eval $set_cc_for_build
+ cat <<-EOF > $dummy.c
+ #include <features.h>
+ #if defined(__UCLIBC__)
+ LIBC=uclibc
+ #elif defined(__dietlibc__)
+ LIBC=dietlibc
+ #else
+ LIBC=gnu
+ #endif
+ EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`
+ ;;
+esac
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+ *:NetBSD:*:*)
+ # NetBSD (nbsd) targets should (where applicable) match one or
+ # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
+ # switched to ELF, *-*-netbsd* would select the old
+ # object file format. This provides both forward
+ # compatibility and a consistent mechanism for selecting the
+ # object file format.
+ #
+ # Note: NetBSD doesn't particularly care about the vendor
+ # portion of the name. We always set it to "unknown".
+ sysctl="sysctl -n hw.machine_arch"
+ UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \
+ /sbin/$sysctl 2>/dev/null || \
+ /usr/sbin/$sysctl 2>/dev/null || \
+ echo unknown)`
+ case "${UNAME_MACHINE_ARCH}" in
+ armeb) machine=armeb-unknown ;;
+ arm*) machine=arm-unknown ;;
+ sh3el) machine=shl-unknown ;;
+ sh3eb) machine=sh-unknown ;;
+ sh5el) machine=sh5le-unknown ;;
+ earmv*)
+ arch=`echo ${UNAME_MACHINE_ARCH} | sed -e 's,^e\(armv[0-9]\).*$,\1,'`
+ endian=`echo ${UNAME_MACHINE_ARCH} | sed -ne 's,^.*\(eb\)$,\1,p'`
+ machine=${arch}${endian}-unknown
+ ;;
+ *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+ esac
+ # The Operating System including object format, if it has switched
+ # to ELF recently, or will in the future.
+ case "${UNAME_MACHINE_ARCH}" in
+ arm*|earm*|i386|m68k|ns32k|sh3*|sparc|vax)
+ eval $set_cc_for_build
+ if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ELF__
+ then
+ # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+ # Return netbsd for either. FIX?
+ os=netbsd
+ else
+ os=netbsdelf
+ fi
+ ;;
+ *)
+ os=netbsd
+ ;;
+ esac
+ # Determine ABI tags.
+ case "${UNAME_MACHINE_ARCH}" in
+ earm*)
+ expr='s/^earmv[0-9]/-eabi/;s/eb$//'
+ abi=`echo ${UNAME_MACHINE_ARCH} | sed -e "$expr"`
+ ;;
+ esac
+ # The OS release
+ # Debian GNU/NetBSD machines have a different userland, and
+ # thus, need a distinct triplet. However, they do not need
+ # kernel version information, so it can be replaced with a
+ # suitable tag, in the style of linux-gnu.
+ case "${UNAME_VERSION}" in
+ Debian*)
+ release='-gnu'
+ ;;
+ *)
+ release=`echo ${UNAME_RELEASE} | sed -e 's/[-_].*//' | cut -d. -f1,2`
+ ;;
+ esac
+ # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+ # contains redundant information, the shorter form:
+ # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+ echo "${machine}-${os}${release}${abi}"
+ exit ;;
+ *:Bitrig:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE}
+ exit ;;
+ *:OpenBSD:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
+ exit ;;
+ *:LibertyBSD:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-libertybsd${UNAME_RELEASE}
+ exit ;;
+ *:ekkoBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
+ exit ;;
+ *:SolidBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
+ exit ;;
+ macppc:MirBSD:*:*)
+ echo powerpc-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ *:MirBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ *:Sortix:*:*)
+ echo ${UNAME_MACHINE}-unknown-sortix
+ exit ;;
+ alpha:OSF1:*:*)
+ case $UNAME_RELEASE in
+ *4.0)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+ ;;
+ *5.*)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
+ ;;
+ esac
+ # According to Compaq, /usr/sbin/psrinfo has been available on
+ # OSF/1 and Tru64 systems produced since 1995. I hope that
+ # covers most systems running today. This code pipes the CPU
+ # types through head -n 1, so we only detect the type of CPU 0.
+ ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1`
+ case "$ALPHA_CPU_TYPE" in
+ "EV4 (21064)")
+ UNAME_MACHINE=alpha ;;
+ "EV4.5 (21064)")
+ UNAME_MACHINE=alpha ;;
+ "LCA4 (21066/21068)")
+ UNAME_MACHINE=alpha ;;
+ "EV5 (21164)")
+ UNAME_MACHINE=alphaev5 ;;
+ "EV5.6 (21164A)")
+ UNAME_MACHINE=alphaev56 ;;
+ "EV5.6 (21164PC)")
+ UNAME_MACHINE=alphapca56 ;;
+ "EV5.7 (21164PC)")
+ UNAME_MACHINE=alphapca57 ;;
+ "EV6 (21264)")
+ UNAME_MACHINE=alphaev6 ;;
+ "EV6.7 (21264A)")
+ UNAME_MACHINE=alphaev67 ;;
+ "EV6.8CB (21264C)")
+ UNAME_MACHINE=alphaev68 ;;
+ "EV6.8AL (21264B)")
+ UNAME_MACHINE=alphaev68 ;;
+ "EV6.8CX (21264D)")
+ UNAME_MACHINE=alphaev68 ;;
+ "EV6.9A (21264/EV69A)")
+ UNAME_MACHINE=alphaev69 ;;
+ "EV7 (21364)")
+ UNAME_MACHINE=alphaev7 ;;
+ "EV7.9 (21364A)")
+ UNAME_MACHINE=alphaev79 ;;
+ esac
+ # A Pn.n version is a patched version.
+ # A Vn.n version is a released version.
+ # A Tn.n version is a released field test version.
+ # A Xn.n version is an unreleased experimental baselevel.
+ # 1.2 uses "1.2" for uname -r.
+ echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`
+ # Reset EXIT trap before exiting to avoid spurious non-zero exit code.
+ exitcode=$?
+ trap '' 0
+ exit $exitcode ;;
+ Alpha\ *:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # Should we change UNAME_MACHINE based on the output of uname instead
+ # of the specific Alpha model?
+ echo alpha-pc-interix
+ exit ;;
+ 21064:Windows_NT:50:3)
+ echo alpha-dec-winnt3.5
+ exit ;;
+ Amiga*:UNIX_System_V:4.0:*)
+ echo m68k-unknown-sysv4
+ exit ;;
+ *:[Aa]miga[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-amigaos
+ exit ;;
+ *:[Mm]orph[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-morphos
+ exit ;;
+ *:OS/390:*:*)
+ echo i370-ibm-openedition
+ exit ;;
+ *:z/VM:*:*)
+ echo s390-ibm-zvmoe
+ exit ;;
+ *:OS400:*:*)
+ echo powerpc-ibm-os400
+ exit ;;
+ arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+ echo arm-acorn-riscix${UNAME_RELEASE}
+ exit ;;
+ arm*:riscos:*:*|arm*:RISCOS:*:*)
+ echo arm-unknown-riscos
+ exit ;;
+ SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+ echo hppa1.1-hitachi-hiuxmpp
+ exit ;;
+ Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+ # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+ if test "`(/bin/universe) 2>/dev/null`" = att ; then
+ echo pyramid-pyramid-sysv3
+ else
+ echo pyramid-pyramid-bsd
+ fi
+ exit ;;
+ NILE*:*:*:dcosx)
+ echo pyramid-pyramid-svr4
+ exit ;;
+ DRS?6000:unix:4.0:6*)
+ echo sparc-icl-nx6
+ exit ;;
+ DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
+ case `/usr/bin/uname -p` in
+ sparc) echo sparc-icl-nx7; exit ;;
+ esac ;;
+ s390x:SunOS:*:*)
+ echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4H:SunOS:5.*:*)
+ echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+ echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
+ echo i386-pc-auroraux${UNAME_RELEASE}
+ exit ;;
+ i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
+ eval $set_cc_for_build
+ SUN_ARCH=i386
+ # If there is a compiler, see if it is configured for 64-bit objects.
+ # Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
+ # This test works for both compilers.
+ if [ "$CC_FOR_BUILD" != no_compiler_found ]; then
+ if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ SUN_ARCH=x86_64
+ fi
+ fi
+ echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:6*:*)
+ # According to config.sub, this is the proper way to canonicalize
+ # SunOS6. Hard to guess exactly what SunOS6 will be like, but
+ # it's likely to be more like Solaris than SunOS4.
+ echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:*:*)
+ case "`/usr/bin/arch -k`" in
+ Series*|S4*)
+ UNAME_RELEASE=`uname -v`
+ ;;
+ esac
+ # Japanese Language versions have a version number like `4.1.3-JL'.
+ echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+ exit ;;
+ sun3*:SunOS:*:*)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ exit ;;
+ sun*:*:4.2BSD:*)
+ UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+ test "x${UNAME_RELEASE}" = x && UNAME_RELEASE=3
+ case "`/bin/arch`" in
+ sun3)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ ;;
+ sun4)
+ echo sparc-sun-sunos${UNAME_RELEASE}
+ ;;
+ esac
+ exit ;;
+ aushp:SunOS:*:*)
+ echo sparc-auspex-sunos${UNAME_RELEASE}
+ exit ;;
+ # The situation for MiNT is a little confusing. The machine name
+ # can be virtually everything (everything which is not
+ # "atarist" or "atariste" at least should have a processor
+ # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
+ # to the lowercase version "mint" (or "freemint"). Finally
+ # the system name "TOS" denotes a system which is actually not
+ # MiNT. But MiNT is downward compatible to TOS, so this should
+ # be no problem.
+ atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+ echo m68k-milan-mint${UNAME_RELEASE}
+ exit ;;
+ hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+ echo m68k-hades-mint${UNAME_RELEASE}
+ exit ;;
+ *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+ echo m68k-unknown-mint${UNAME_RELEASE}
+ exit ;;
+ m68k:machten:*:*)
+ echo m68k-apple-machten${UNAME_RELEASE}
+ exit ;;
+ powerpc:machten:*:*)
+ echo powerpc-apple-machten${UNAME_RELEASE}
+ exit ;;
+ RISC*:Mach:*:*)
+ echo mips-dec-mach_bsd4.3
+ exit ;;
+ RISC*:ULTRIX:*:*)
+ echo mips-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ VAX*:ULTRIX*:*:*)
+ echo vax-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ 2020:CLIX:*:* | 2430:CLIX:*:*)
+ echo clipper-intergraph-clix${UNAME_RELEASE}
+ exit ;;
+ mips:*:*:UMIPS | mips:*:*:RISCos)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+#ifdef __cplusplus
+#include <stdio.h> /* for printf() prototype */
+ int main (int argc, char *argv[]) {
+#else
+ int main (argc, argv) int argc; char *argv[]; {
+#endif
+ #if defined (host_mips) && defined (MIPSEB)
+ #if defined (SYSTYPE_SYSV)
+ printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_SVR4)
+ printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+ printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+ #endif
+ #endif
+ exit (-1);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c &&
+ dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+ SYSTEM_NAME=`$dummy $dummyarg` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo mips-mips-riscos${UNAME_RELEASE}
+ exit ;;
+ Motorola:PowerMAX_OS:*:*)
+ echo powerpc-motorola-powermax
+ exit ;;
+ Motorola:*:4.3:PL8-*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:Power_UNIX:*:*)
+ echo powerpc-harris-powerunix
+ exit ;;
+ m88k:CX/UX:7*:*)
+ echo m88k-harris-cxux7
+ exit ;;
+ m88k:*:4*:R4*)
+ echo m88k-motorola-sysv4
+ exit ;;
+ m88k:*:3*:R3*)
+ echo m88k-motorola-sysv3
+ exit ;;
+ AViiON:dgux:*:*)
+ # DG/UX returns AViiON for all architectures
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+ then
+ if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
+ [ ${TARGET_BINARY_INTERFACE}x = x ]
+ then
+ echo m88k-dg-dgux${UNAME_RELEASE}
+ else
+ echo m88k-dg-dguxbcs${UNAME_RELEASE}
+ fi
+ else
+ echo i586-dg-dgux${UNAME_RELEASE}
+ fi
+ exit ;;
+ M88*:DolphinOS:*:*) # DolphinOS (SVR3)
+ echo m88k-dolphin-sysv3
+ exit ;;
+ M88*:*:R3*:*)
+ # Delta 88k system running SVR3
+ echo m88k-motorola-sysv3
+ exit ;;
+ XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+ echo m88k-tektronix-sysv3
+ exit ;;
+ Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+ echo m68k-tektronix-bsd
+ exit ;;
+ *:IRIX*:*:*)
+ echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+ exit ;;
+ ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit ;; # Note that: echo "'`uname -s`'" gives 'AIX '
+ i*86:AIX:*:*)
+ echo i386-ibm-aix
+ exit ;;
+ ia64:AIX:*:*)
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:2:3)
+ if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <sys/systemcfg.h>
+
+ main()
+ {
+ if (!__power_pc())
+ exit(1);
+ puts("powerpc-ibm-aix3.2.5");
+ exit(0);
+ }
+EOF
+ if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
+ then
+ echo "$SYSTEM_NAME"
+ else
+ echo rs6000-ibm-aix3.2.5
+ fi
+ elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+ echo rs6000-ibm-aix3.2.4
+ else
+ echo rs6000-ibm-aix3.2
+ fi
+ exit ;;
+ *:AIX:*:[4567])
+ IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
+ if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+ IBM_ARCH=rs6000
+ else
+ IBM_ARCH=powerpc
+ fi
+ if [ -x /usr/bin/lslpp ] ; then
+ IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc |
+ awk -F: '{ print $3 }' | sed s/[0-9]*$/0/`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:*:*)
+ echo rs6000-ibm-aix
+ exit ;;
+ ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+ echo romp-ibm-bsd4.4
+ exit ;;
+ ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
+ echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
+ exit ;; # report: romp-ibm BSD 4.3
+ *:BOSX:*:*)
+ echo rs6000-bull-bosx
+ exit ;;
+ DPX/2?00:B.O.S.:*:*)
+ echo m68k-bull-sysv3
+ exit ;;
+ 9000/[34]??:4.3bsd:1.*:*)
+ echo m68k-hp-bsd
+ exit ;;
+ hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+ echo m68k-hp-bsd4.4
+ exit ;;
+ 9000/[34678]??:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ case "${UNAME_MACHINE}" in
+ 9000/31? ) HP_ARCH=m68000 ;;
+ 9000/[34]?? ) HP_ARCH=m68k ;;
+ 9000/[678][0-9][0-9])
+ if [ -x /usr/bin/getconf ]; then
+ sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
+ sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+ case "${sc_cpu_version}" in
+ 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0
+ 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1
+ 532) # CPU_PA_RISC2_0
+ case "${sc_kernel_bits}" in
+ 32) HP_ARCH=hppa2.0n ;;
+ 64) HP_ARCH=hppa2.0w ;;
+ '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20
+ esac ;;
+ esac
+ fi
+ if [ "${HP_ARCH}" = "" ]; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+
+ #define _HPUX_SOURCE
+ #include <stdlib.h>
+ #include <unistd.h>
+
+ int main ()
+ {
+ #if defined(_SC_KERNEL_BITS)
+ long bits = sysconf(_SC_KERNEL_BITS);
+ #endif
+ long cpu = sysconf (_SC_CPU_VERSION);
+
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+ case CPU_PA_RISC2_0:
+ #if defined(_SC_KERNEL_BITS)
+ switch (bits)
+ {
+ case 64: puts ("hppa2.0w"); break;
+ case 32: puts ("hppa2.0n"); break;
+ default: puts ("hppa2.0"); break;
+ } break;
+ #else /* !defined(_SC_KERNEL_BITS) */
+ puts ("hppa2.0"); break;
+ #endif
+ default: puts ("hppa1.0"); break;
+ }
+ exit (0);
+ }
+EOF
+ (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
+ test -z "$HP_ARCH" && HP_ARCH=hppa
+ fi ;;
+ esac
+ if [ ${HP_ARCH} = hppa2.0w ]
+ then
+ eval $set_cc_for_build
+
+ # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
+ # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
+ # generating 64-bit code. GNU and HP use different nomenclature:
+ #
+ # $ CC_FOR_BUILD=cc ./config.guess
+ # => hppa2.0w-hp-hpux11.23
+ # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
+ # => hppa64-hp-hpux11.23
+
+ if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
+ grep -q __LP64__
+ then
+ HP_ARCH=hppa2.0w
+ else
+ HP_ARCH=hppa64
+ fi
+ fi
+ echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+ exit ;;
+ ia64:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ echo ia64-hp-hpux${HPUX_REV}
+ exit ;;
+ 3050*:HI-UX:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <unistd.h>
+ int
+ main ()
+ {
+ long cpu = sysconf (_SC_CPU_VERSION);
+ /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+ true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
+ results, however. */
+ if (CPU_IS_PA_RISC (cpu))
+ {
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+ default: puts ("hppa-hitachi-hiuxwe2"); break;
+ }
+ }
+ else if (CPU_IS_HP_MC68K (cpu))
+ puts ("m68k-hitachi-hiuxwe2");
+ else puts ("unknown-hitachi-hiuxwe2");
+ exit (0);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo unknown-hitachi-hiuxwe2
+ exit ;;
+ 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+ echo hppa1.1-hp-bsd
+ exit ;;
+ 9000/8??:4.3bsd:*:*)
+ echo hppa1.0-hp-bsd
+ exit ;;
+ *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+ echo hppa1.0-hp-mpeix
+ exit ;;
+ hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+ echo hppa1.1-hp-osf
+ exit ;;
+ hp8??:OSF1:*:*)
+ echo hppa1.0-hp-osf
+ exit ;;
+ i*86:OSF1:*:*)
+ if [ -x /usr/sbin/sysversion ] ; then
+ echo ${UNAME_MACHINE}-unknown-osf1mk
+ else
+ echo ${UNAME_MACHINE}-unknown-osf1
+ fi
+ exit ;;
+ parisc*:Lites*:*:*)
+ echo hppa1.1-hp-lites
+ exit ;;
+ C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+ echo c1-convex-bsd
+ exit ;;
+ C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit ;;
+ C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+ echo c34-convex-bsd
+ exit ;;
+ C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+ echo c38-convex-bsd
+ exit ;;
+ C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+ echo c4-convex-bsd
+ exit ;;
+ CRAY*Y-MP:*:*:*)
+ echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*[A-Z]90:*:*:*)
+ echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+ -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*TS:*:*:*)
+ echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*T3E:*:*:*)
+ echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*SV1:*:*:*)
+ echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ *:UNICOS/mp:*:*)
+ echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+ FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`
+ FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+ echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ 5000:UNIX_System_V:4.*:*)
+ FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'`
+ echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+ echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+ exit ;;
+ sparc*:BSD/OS:*:*)
+ echo sparc-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:BSD/OS:*:*)
+ echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:FreeBSD:*:*)
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ case ${UNAME_PROCESSOR} in
+ amd64)
+ echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ *)
+ echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ esac
+ exit ;;
+ i*:CYGWIN*:*)
+ echo ${UNAME_MACHINE}-pc-cygwin
+ exit ;;
+ *:MINGW64*:*)
+ echo ${UNAME_MACHINE}-pc-mingw64
+ exit ;;
+ *:MINGW*:*)
+ echo ${UNAME_MACHINE}-pc-mingw32
+ exit ;;
+ *:MSYS*:*)
+ echo ${UNAME_MACHINE}-pc-msys
+ exit ;;
+ i*:windows32*:*)
+ # uname -m includes "-pc" on this system.
+ echo ${UNAME_MACHINE}-mingw32
+ exit ;;
+ i*:PW*:*)
+ echo ${UNAME_MACHINE}-pc-pw32
+ exit ;;
+ *:Interix*:*)
+ case ${UNAME_MACHINE} in
+ x86)
+ echo i586-pc-interix${UNAME_RELEASE}
+ exit ;;
+ authenticamd | genuineintel | EM64T)
+ echo x86_64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ IA64)
+ echo ia64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ esac ;;
+ [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
+ echo i${UNAME_MACHINE}-pc-mks
+ exit ;;
+ 8664:Windows_NT:*)
+ echo x86_64-pc-mks
+ exit ;;
+ i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
+ # UNAME_MACHINE based on the output of uname instead of i386?
+ echo i586-pc-interix
+ exit ;;
+ i*:UWIN*:*)
+ echo ${UNAME_MACHINE}-pc-uwin
+ exit ;;
+ amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
+ echo x86_64-unknown-cygwin
+ exit ;;
+ p*:CYGWIN*:*)
+ echo powerpcle-unknown-cygwin
+ exit ;;
+ prep*:SunOS:5.*:*)
+ echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ *:GNU:*:*)
+ # the GNU system
+ echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ exit ;;
+ *:GNU/*:*:*)
+ # other systems with GNU libc and userland
+ echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC}
+ exit ;;
+ i*86:Minix:*:*)
+ echo ${UNAME_MACHINE}-pc-minix
+ exit ;;
+ aarch64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ aarch64_be:Linux:*:*)
+ UNAME_MACHINE=aarch64_be
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ alpha:Linux:*:*)
+ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+ EV5) UNAME_MACHINE=alphaev5 ;;
+ EV56) UNAME_MACHINE=alphaev56 ;;
+ PCA56) UNAME_MACHINE=alphapca56 ;;
+ PCA57) UNAME_MACHINE=alphapca56 ;;
+ EV6) UNAME_MACHINE=alphaev6 ;;
+ EV67) UNAME_MACHINE=alphaev67 ;;
+ EV68*) UNAME_MACHINE=alphaev68 ;;
+ esac
+ objdump --private-headers /bin/sh | grep -q ld.so.1
+ if test "$?" = 0 ; then LIBC=gnulibc1 ; fi
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ arc:Linux:*:* | arceb:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ arm*:Linux:*:*)
+ eval $set_cc_for_build
+ if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_EABI__
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ else
+ if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_PCS_VFP
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi
+ else
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf
+ fi
+ fi
+ exit ;;
+ avr32*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ cris:Linux:*:*)
+ echo ${UNAME_MACHINE}-axis-linux-${LIBC}
+ exit ;;
+ crisv32:Linux:*:*)
+ echo ${UNAME_MACHINE}-axis-linux-${LIBC}
+ exit ;;
+ e2k:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ frv:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ hexagon:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ i*86:Linux:*:*)
+ echo ${UNAME_MACHINE}-pc-linux-${LIBC}
+ exit ;;
+ ia64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ k1om:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ m32r*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ m68*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ mips:Linux:*:* | mips64:Linux:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #undef CPU
+ #undef ${UNAME_MACHINE}
+ #undef ${UNAME_MACHINE}el
+ #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+ CPU=${UNAME_MACHINE}el
+ #else
+ #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+ CPU=${UNAME_MACHINE}
+ #else
+ CPU=
+ #endif
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
+ test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; }
+ ;;
+ openrisc*:Linux:*:*)
+ echo or1k-unknown-linux-${LIBC}
+ exit ;;
+ or32:Linux:*:* | or1k*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ padre:Linux:*:*)
+ echo sparc-unknown-linux-${LIBC}
+ exit ;;
+ parisc64:Linux:*:* | hppa64:Linux:*:*)
+ echo hppa64-unknown-linux-${LIBC}
+ exit ;;
+ parisc:Linux:*:* | hppa:Linux:*:*)
+ # Look for CPU level
+ case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
+ PA7*) echo hppa1.1-unknown-linux-${LIBC} ;;
+ PA8*) echo hppa2.0-unknown-linux-${LIBC} ;;
+ *) echo hppa-unknown-linux-${LIBC} ;;
+ esac
+ exit ;;
+ ppc64:Linux:*:*)
+ echo powerpc64-unknown-linux-${LIBC}
+ exit ;;
+ ppc:Linux:*:*)
+ echo powerpc-unknown-linux-${LIBC}
+ exit ;;
+ ppc64le:Linux:*:*)
+ echo powerpc64le-unknown-linux-${LIBC}
+ exit ;;
+ ppcle:Linux:*:*)
+ echo powerpcle-unknown-linux-${LIBC}
+ exit ;;
+ s390:Linux:*:* | s390x:Linux:*:*)
+ echo ${UNAME_MACHINE}-ibm-linux-${LIBC}
+ exit ;;
+ sh64*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ sh*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ sparc:Linux:*:* | sparc64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ tile*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ vax:Linux:*:*)
+ echo ${UNAME_MACHINE}-dec-linux-${LIBC}
+ exit ;;
+ x86_64:Linux:*:*)
+ echo ${UNAME_MACHINE}-pc-linux-${LIBC}
+ exit ;;
+ xtensa*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ i*86:DYNIX/ptx:4*:*)
+ # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+ # earlier versions are messed up and put the nodename in both
+ # sysname and nodename.
+ echo i386-sequent-sysv4
+ exit ;;
+ i*86:UNIX_SV:4.2MP:2.*)
+ # Unixware is an offshoot of SVR4, but it has its own version
+ # number series starting with 2...
+ # I am not positive that other SVR4 systems won't match this,
+ # I just have to hope. -- rms.
+ # Use sysv4.2uw... so that sysv4* matches it.
+ echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+ exit ;;
+ i*86:OS/2:*:*)
+ # If we were able to find `uname', then EMX Unix compatibility
+ # is probably installed.
+ echo ${UNAME_MACHINE}-pc-os2-emx
+ exit ;;
+ i*86:XTS-300:*:STOP)
+ echo ${UNAME_MACHINE}-unknown-stop
+ exit ;;
+ i*86:atheos:*:*)
+ echo ${UNAME_MACHINE}-unknown-atheos
+ exit ;;
+ i*86:syllable:*:*)
+ echo ${UNAME_MACHINE}-pc-syllable
+ exit ;;
+ i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
+ echo i386-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ i*86:*DOS:*:*)
+ echo ${UNAME_MACHINE}-pc-msdosdjgpp
+ exit ;;
+ i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
+ UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+ if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+ echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+ else
+ echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+ fi
+ exit ;;
+ i*86:*:5:[678]*)
+ # UnixWare 7.x, OpenUNIX and OpenServer 6.
+ case `/bin/uname -X | grep "^Machine"` in
+ *486*) UNAME_MACHINE=i486 ;;
+ *Pentium) UNAME_MACHINE=i586 ;;
+ *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+ esac
+ echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+ exit ;;
+ i*86:*:3.2:*)
+ if test -f /usr/options/cb.name; then
+ UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+ echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
+ elif /bin/uname -X 2>/dev/null >/dev/null ; then
+ UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
+ (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+ (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+ && UNAME_MACHINE=i586
+ (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+ && UNAME_MACHINE=i686
+ (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+ && UNAME_MACHINE=i686
+ echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+ else
+ echo ${UNAME_MACHINE}-pc-sysv32
+ fi
+ exit ;;
+ pc:*:*:*)
+ # Left here for compatibility:
+ # uname -m prints for DJGPP always 'pc', but it prints nothing about
+ # the processor, so we play safe by assuming i586.
+ # Note: whatever this is, it MUST be the same as what config.sub
+ # prints for the "djgpp" host, or else GDB configure will decide that
+ # this is a cross-build.
+ echo i586-pc-msdosdjgpp
+ exit ;;
+ Intel:Mach:3*:*)
+ echo i386-pc-mach3
+ exit ;;
+ paragon:*:*:*)
+ echo i860-intel-osf1
+ exit ;;
+ i860:*:4.*:*) # i860-SVR4
+ if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+ echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+ else # Add other i860-SVR4 vendors below as they are discovered.
+ echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
+ fi
+ exit ;;
+ mini*:CTIX:SYS*5:*)
+ # "miniframe"
+ echo m68010-convergent-sysv
+ exit ;;
+ mc68k:UNIX:SYSTEM5:3.51m)
+ echo m68k-convergent-sysv
+ exit ;;
+ M680?0:D-NIX:5.3:*)
+ echo m68k-diab-dnix
+ exit ;;
+ M68*:*:R3V[5678]*:*)
+ test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
+ 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
+ OS_REL=''
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4; exit; } ;;
+ NCR*:*:4.2:* | MPRAS*:*:4.2:*)
+ OS_REL='.3'
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+ echo m68k-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ mc68030:UNIX_System_V:4.*:*)
+ echo m68k-atari-sysv4
+ exit ;;
+ TSUNAMI:LynxOS:2.*:*)
+ echo sparc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ rs6000:LynxOS:2.*:*)
+ echo rs6000-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
+ echo powerpc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ SM[BE]S:UNIX_SV:*:*)
+ echo mips-dde-sysv${UNAME_RELEASE}
+ exit ;;
+ RM*:ReliantUNIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ RM*:SINIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ *:SINIX-*:*:*)
+ if uname -p 2>/dev/null >/dev/null ; then
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ echo ${UNAME_MACHINE}-sni-sysv4
+ else
+ echo ns32k-sni-sysv
+ fi
+ exit ;;
+ PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+ # says <Richard.M.Bartel@ccMail.Census.GOV>
+ echo i586-unisys-sysv4
+ exit ;;
+ *:UNIX_System_V:4*:FTX*)
+ # From Gerald Hewes <hewes@openmarket.com>.
+ # How about differentiating between stratus architectures? -djm
+ echo hppa1.1-stratus-sysv4
+ exit ;;
+ *:*:*:FTX*)
+ # From seanf@swdc.stratus.com.
+ echo i860-stratus-sysv4
+ exit ;;
+ i*86:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo ${UNAME_MACHINE}-stratus-vos
+ exit ;;
+ *:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo hppa1.1-stratus-vos
+ exit ;;
+ mc68*:A/UX:*:*)
+ echo m68k-apple-aux${UNAME_RELEASE}
+ exit ;;
+ news*:NEWS-OS:6*:*)
+ echo mips-sony-newsos6
+ exit ;;
+ R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+ if [ -d /usr/nec ]; then
+ echo mips-nec-sysv${UNAME_RELEASE}
+ else
+ echo mips-unknown-sysv${UNAME_RELEASE}
+ fi
+ exit ;;
+ BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
+ echo powerpc-be-beos
+ exit ;;
+ BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
+ echo powerpc-apple-beos
+ exit ;;
+ BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
+ echo i586-pc-beos
+ exit ;;
+ BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
+ echo i586-pc-haiku
+ exit ;;
+ x86_64:Haiku:*:*)
+ echo x86_64-unknown-haiku
+ exit ;;
+ SX-4:SUPER-UX:*:*)
+ echo sx4-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-5:SUPER-UX:*:*)
+ echo sx5-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-6:SUPER-UX:*:*)
+ echo sx6-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-7:SUPER-UX:*:*)
+ echo sx7-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-8:SUPER-UX:*:*)
+ echo sx8-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-8R:SUPER-UX:*:*)
+ echo sx8r-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-ACE:SUPER-UX:*:*)
+ echo sxace-nec-superux${UNAME_RELEASE}
+ exit ;;
+ Power*:Rhapsody:*:*)
+ echo powerpc-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Rhapsody:*:*)
+ echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Darwin:*:*)
+ UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
+ eval $set_cc_for_build
+ if test "$UNAME_PROCESSOR" = unknown ; then
+ UNAME_PROCESSOR=powerpc
+ fi
+ if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then
+ if [ "$CC_FOR_BUILD" != no_compiler_found ]; then
+ if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ case $UNAME_PROCESSOR in
+ i386) UNAME_PROCESSOR=x86_64 ;;
+ powerpc) UNAME_PROCESSOR=powerpc64 ;;
+ esac
+ fi
+ fi
+ elif test "$UNAME_PROCESSOR" = i386 ; then
+ # Avoid executing cc on OS X 10.9, as it ships with a stub
+ # that puts up a graphical alert prompting to install
+ # developer tools. Any system running Mac OS X 10.7 or
+ # later (Darwin 11 and later) is required to have a 64-bit
+ # processor. This is not true of the ARM version of Darwin
+ # that Apple uses in portable devices.
+ UNAME_PROCESSOR=x86_64
+ fi
+ echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
+ exit ;;
+ *:procnto*:*:* | *:QNX:[0123456789]*:*)
+ UNAME_PROCESSOR=`uname -p`
+ if test "$UNAME_PROCESSOR" = x86; then
+ UNAME_PROCESSOR=i386
+ UNAME_MACHINE=pc
+ fi
+ echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+ exit ;;
+ *:QNX:*:4*)
+ echo i386-pc-qnx
+ exit ;;
+ NEO-?:NONSTOP_KERNEL:*:*)
+ echo neo-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSE-*:NONSTOP_KERNEL:*:*)
+ echo nse-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSR-?:NONSTOP_KERNEL:*:*)
+ echo nsr-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ *:NonStop-UX:*:*)
+ echo mips-compaq-nonstopux
+ exit ;;
+ BS2000:POSIX*:*:*)
+ echo bs2000-siemens-sysv
+ exit ;;
+ DS/*:UNIX_System_V:*:*)
+ echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+ exit ;;
+ *:Plan9:*:*)
+ # "uname -m" is not consistent, so use $cputype instead. 386
+ # is converted to i386 for consistency with other x86
+ # operating systems.
+ if test "$cputype" = 386; then
+ UNAME_MACHINE=i386
+ else
+ UNAME_MACHINE="$cputype"
+ fi
+ echo ${UNAME_MACHINE}-unknown-plan9
+ exit ;;
+ *:TOPS-10:*:*)
+ echo pdp10-unknown-tops10
+ exit ;;
+ *:TENEX:*:*)
+ echo pdp10-unknown-tenex
+ exit ;;
+ KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+ echo pdp10-dec-tops20
+ exit ;;
+ XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+ echo pdp10-xkl-tops20
+ exit ;;
+ *:TOPS-20:*:*)
+ echo pdp10-unknown-tops20
+ exit ;;
+ *:ITS:*:*)
+ echo pdp10-unknown-its
+ exit ;;
+ SEI:*:*:SEIUX)
+ echo mips-sei-seiux${UNAME_RELEASE}
+ exit ;;
+ *:DragonFly:*:*)
+ echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+ exit ;;
+ *:*VMS:*:*)
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ case "${UNAME_MACHINE}" in
+ A*) echo alpha-dec-vms ; exit ;;
+ I*) echo ia64-dec-vms ; exit ;;
+ V*) echo vax-dec-vms ; exit ;;
+ esac ;;
+ *:XENIX:*:SysV)
+ echo i386-pc-xenix
+ exit ;;
+ i*86:skyos:*:*)
+ echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
+ exit ;;
+ i*86:rdos:*:*)
+ echo ${UNAME_MACHINE}-pc-rdos
+ exit ;;
+ i*86:AROS:*:*)
+ echo ${UNAME_MACHINE}-pc-aros
+ exit ;;
+ x86_64:VMkernel:*:*)
+ echo ${UNAME_MACHINE}-unknown-esx
+ exit ;;
+ amd64:Isilon\ OneFS:*:*)
+ echo x86_64-unknown-onefs
+ exit ;;
+esac
+
+cat >&2 <<EOF
+$0: unable to guess system type
+
+This script, last modified $timestamp, has failed to recognize
+the operating system you are using. It is advised that you
+download the most up to date version of the config scripts from
+
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess
+and
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub
+
+If the version you run ($0) is already up to date, please
+send the following data and any information you think might be
+pertinent to <config-patches@gnu.org> in order to provide the needed
+information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo = `(hostinfo) 2>/dev/null`
+/bin/universe = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = ${UNAME_MACHINE}
+UNAME_RELEASE = ${UNAME_RELEASE}
+UNAME_SYSTEM = ${UNAME_SYSTEM}
+UNAME_VERSION = ${UNAME_VERSION}
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
diff --git a/build/autoconf/mozconfig-find b/build/autoconf/mozconfig-find
new file mode 100644
index 0000000..97dd90c
--- /dev/null
+++ b/build/autoconf/mozconfig-find
@@ -0,0 +1,76 @@
+#! /bin/sh
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# mozconfigfind - Loads options from .mozconfig onto configure's
+# command-line. The .mozconfig file is searched for in the
+# order:
+# If $MOZCONFIG is set, use that.
+# If one of $TOPSRCDIR/.mozconfig or $TOPSRCDIR/mozconfig exists, use it.
+# If both exist, or if various legacy locations contain a mozconfig, error.
+# Otherwise, use the default build options.
+#
+topsrcdir=$1
+
+abspath() {
+ if uname -s | grep -q MINGW; then
+ # We have no way to figure out whether we're in gmake or pymake right
+ # now. gmake gives us Unix-style paths while pymake gives us Windows-style
+ # paths, so attempt to handle both.
+ regexes='^\([A-Za-z]:\|\\\\\|\/\) ^\/'
+ else
+ regexes='^\/'
+ fi
+
+ for regex in $regexes; do
+ if echo $1 | grep -q $regex; then
+ echo $1
+ return
+ fi
+ done
+
+ # If we're at this point, we have a relative path
+ echo `pwd`/$1
+}
+
+if [ -n "$MOZCONFIG" ] && ! [ -f "$MOZCONFIG" ]; then
+ echo "Specified MOZCONFIG \"$MOZCONFIG\" does not exist!" 1>&2
+ exit 1
+fi
+
+if [ -n "$MOZ_MYCONFIG" ]; then
+ echo "Your environment currently has the MOZ_MYCONFIG variable set to \"$MOZ_MYCONFIG\". MOZ_MYCONFIG is no longer supported. Please use MOZCONFIG instead." 1>&2
+ exit 1
+fi
+
+if [ -z "$MOZCONFIG" ] && [ -f "$topsrcdir/.mozconfig" ] && [ -f "$topsrcdir/mozconfig" ]; then
+ echo "Both \$topsrcdir/.mozconfig and \$topsrcdir/mozconfig are supported, but you must choose only one. Please remove the other." 1>&2
+ exit 1
+fi
+
+for _config in "$MOZCONFIG" \
+ "$topsrcdir/.mozconfig" \
+ "$topsrcdir/mozconfig"
+do
+ if test -f "$_config"; then
+ abspath $_config
+ exit 0
+ fi
+done
+
+# We used to support a number of other implicit .mozconfig locations. We now
+# detect if we were about to use any of these locations and issue an error if we
+# find any.
+for _config in "$topsrcdir/mozconfig.sh" \
+ "$topsrcdir/myconfig.sh" \
+ "$HOME/.mozconfig" \
+ "$HOME/.mozconfig.sh" \
+ "$HOME/.mozmyconfig.sh"
+do
+ if test -f "$_config"; then
+ echo "You currently have a mozconfig at \"$_config\". This implicit location is no longer supported. Please move it to $topsrcdir/.mozconfig or specify it explicitly via \$MOZCONFIG." 1>&2
+ exit 1
+ fi
+done
diff --git a/build/autoconf/mozconfig2client-mk b/build/autoconf/mozconfig2client-mk
new file mode 100644
index 0000000..aaf8de1
--- /dev/null
+++ b/build/autoconf/mozconfig2client-mk
@@ -0,0 +1,76 @@
+#! /bin/sh
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# mozconfig2client-mk - Translates .mozconfig into options for client.mk.
+# Prints defines to stdout.
+#
+# See mozconfig2configure for more details
+
+print_header() {
+ cat <<EOF
+# gmake
+# This file is automatically generated for client.mk.
+# Do not edit. Edit $FOUND_MOZCONFIG instead.
+
+EOF
+}
+
+ac_add_options() {
+ for _opt
+ do
+ case "$_opt" in
+ --target=*)
+ echo $_opt | sed s/--target/CONFIG_GUESS/
+ ;;
+ *)
+ echo "# $_opt is used by configure (not client.mk)"
+ ;;
+ esac
+ done
+}
+
+ac_add_app_options() {
+ echo "# $* is used by configure (not client.mk)"
+}
+
+mk_add_options() {
+ for _opt
+ do
+ # Escape shell characters, space, tab, dollar, quote, backslash,
+ # and substitute '@<word>@' with '$(<word>)'.
+ _opt=`echo "$_opt" | sed -e 's/\([\"\\]\)/\\\\\1/g; s/@\([^@]*\)@/\$(\1)/g;'`
+ echo $_opt;
+ done
+}
+
+# Main
+#--------------------------------------------------
+
+scriptdir=`dirname $0`
+topsrcdir=$1
+
+# If the path changes, configure should be rerun
+echo "# PATH=$PATH"
+
+# If FOUND_MOZCONFIG isn't set, look for it and make sure the script doesn't error out
+isfoundset=${FOUND_MOZCONFIG+yes}
+if [ -z $isfoundset ]; then
+ FOUND_MOZCONFIG=`$scriptdir/mozconfig-find $topsrcdir`
+ if [ $? -ne 0 ]; then
+ echo '$(error Fix above errors before continuing.)'
+ else
+ isfoundset=yes
+ fi
+fi
+
+if [ -n $isfoundset ]; then
+ if [ "$FOUND_MOZCONFIG" ]
+ then
+ print_header
+ . "$FOUND_MOZCONFIG"
+ echo "FOUND_MOZCONFIG := $FOUND_MOZCONFIG"
+ fi
+fi
diff --git a/build/dumbmake-dependencies b/build/dumbmake-dependencies
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/build/dumbmake-dependencies
diff --git a/build/mach_bootstrap.py b/build/mach_bootstrap.py
new file mode 100644
index 0000000..896d028
--- /dev/null
+++ b/build/mach_bootstrap.py
@@ -0,0 +1,14 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import unicode_literals
+
+import os, sys
+
+def bootstrap(topsrcdir, mozilla_dir=None):
+ if mozilla_dir is None:
+ mozilla_dir = os.path.join(topsrcdir, 'platform')
+ sys.path[0:0] = [mozilla_dir]
+ import build.mach_bootstrap
+ return build.mach_bootstrap.bootstrap(topsrcdir, mozilla_dir)
diff --git a/build/pymake/make.py b/build/pymake/make.py
new file mode 100644
index 0000000..99d4839
--- /dev/null
+++ b/build/pymake/make.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# This is a wrapper around mozilla-central's pymake. If that isn't found then
+# this uses client.py to pull it in.
+
+import os
+import sys
+import subprocess
+import shlex
+
+def getpath(relpath):
+ thisdir = os.path.dirname(__file__)
+ return os.path.abspath(os.path.join(thisdir, *relpath))
+
+PYMAKE = getpath(["..", "..", "platform", "build", "pymake", "make.py"])
+
+def main(args):
+ if 'TINDERBOX_OUTPUT' in os.environ:
+ # When building on mozilla build slaves, execute mozmake instead. Until bug
+ # 978211, this is the easiest, albeit hackish, way to do this.
+ mozmake = os.path.join(os.path.dirname(__file__), '..', '..',
+ 'mozmake.exe')
+ if os.path.exists(mozmake):
+ cmd = [mozmake]
+ cmd.extend(sys.argv[1:])
+ shell = os.environ.get('SHELL')
+ if shell and not shell.lower().endswith('.exe'):
+ cmd += ['SHELL=%s.exe' % shell]
+ sys.exit(subprocess.call(cmd))
+
+ if not os.path.exists(PYMAKE):
+ raise Exception("Pymake not found")
+
+ subprocess.check_call([sys.executable, PYMAKE] + args)
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/build/pypng/check-sync-exceptions b/build/pypng/check-sync-exceptions
new file mode 100644
index 0000000..b326f7c
--- /dev/null
+++ b/build/pypng/check-sync-exceptions
@@ -0,0 +1,3 @@
+# Nothing in this directory needs to be in sync with mozilla
+# The contents are used only in c-c
+* \ No newline at end of file
diff --git a/build/pypng/exnumpy.py b/build/pypng/exnumpy.py
new file mode 100644
index 0000000..82daf0a
--- /dev/null
+++ b/build/pypng/exnumpy.py
@@ -0,0 +1,128 @@
+#!/usr/bin/env python
+# $URL: http://pypng.googlecode.com/svn/trunk/code/exnumpy.py $
+# $Rev: 126 $
+
+# Numpy example.
+# Original code created by Mel Raab, modified by David Jones.
+
+'''
+ Example code integrating RGB PNG files, PyPNG and NumPy
+ (abstracted from Mel Raab's functioning code)
+'''
+
+# http://www.python.org/doc/2.4.4/lib/module-itertools.html
+import itertools
+
+import numpy
+import png
+
+
+''' If you have a PNG file for an RGB image,
+ and want to create a numpy array of data from it.
+'''
+# Read the file "picture.png" from the current directory. The `Reader`
+# class can take a filename, a file-like object, or the byte data
+# directly; this suggests alternatives such as using urllib to read
+# an image from the internet:
+# png.Reader(file=urllib.urlopen('http://www.libpng.org/pub/png/PngSuite/basn2c16.png'))
+pngReader=png.Reader(filename='picture.png')
+# Tuple unpacking, using multiple assignment, is very useful for the
+# result of asDirect (and other methods).
+# See
+# http://docs.python.org/tutorial/introduction.html#first-steps-towards-programming
+row_count, column_count, pngdata, meta = pngReader.asDirect()
+bitdepth=meta['bitdepth']
+plane_count=meta['planes']
+
+# Make sure we're dealing with RGB files
+assert plane_count == 3
+
+''' Boxed row flat pixel:
+ list([R,G,B, R,G,B, R,G,B],
+ [R,G,B, R,G,B, R,G,B])
+ Array dimensions for this example: (2,9)
+
+ Create `image_2d` as a two-dimensional NumPy array by stacking a
+ sequence of 1-dimensional arrays (rows).
+ The NumPy array mimics PyPNG's (boxed row flat pixel) representation;
+ it will have dimensions ``(row_count,column_count*plane_count)``.
+'''
+# The use of ``numpy.uint16``, below, is to convert each row to a NumPy
+# array with data type ``numpy.uint16``. This is a feature of NumPy,
+# discussed further in
+# http://docs.scipy.org/doc/numpy/user/basics.types.html .
+# You can use avoid the explicit conversion with
+# ``numpy.vstack(pngdata)``, but then NumPy will pick the array's data
+# type; in practice it seems to pick ``numpy.int32``, which is large enough
+# to hold any pixel value for any PNG image but uses 4 bytes per value when
+# 1 or 2 would be enough.
+# --- extract 001 start
+image_2d = numpy.vstack(itertools.imap(numpy.uint16, pngdata))
+# --- extract 001 end
+# Do not be tempted to use ``numpy.asarray``; when passed an iterator
+# (`pngdata` is often an iterator) it will attempt to create a size 1
+# array with the iterator as its only element.
+# An alternative to the above is to create the target array of the right
+# shape, then populate it row by row:
+if 0:
+ image_2d = numpy.zeros((row_count,plane_count*column_count),
+ dtype=numpy.uint16)
+ for row_index, one_boxed_row_flat_pixels in enumerate(pngdata):
+ image_2d[row_index,:]=one_boxed_row_flat_pixels
+
+del pngReader
+del pngdata
+
+
+''' Reconfigure for easier referencing, similar to
+ Boxed row boxed pixel:
+ list([ (R,G,B), (R,G,B), (R,G,B) ],
+ [ (R,G,B), (R,G,B), (R,G,B) ])
+ Array dimensions for this example: (2,3,3)
+
+ ``image_3d`` will contain the image as a three-dimensional numpy
+ array, having dimensions ``(row_count,column_count,plane_count)``.
+'''
+# --- extract 002 start
+image_3d = numpy.reshape(image_2d,
+ (row_count,column_count,plane_count))
+# --- extract 002 end
+
+
+''' ============= '''
+
+''' Convert NumPy image_3d array to PNG image file.
+
+ If the data is three-dimensional, as it is above, the best thing
+ to do is reshape it into a two-dimensional array with a shape of
+ ``(row_count, column_count*plane_count)``. Because a
+ two-dimensional numpy array is an iterator, it can be passed
+ directly to the ``png.Writer.write`` method.
+'''
+
+row_count, column_count, plane_count = image_3d.shape
+assert plane_count==3
+
+pngfile = open('picture_out.png', 'wb')
+try:
+ # This example assumes that you have 16-bit pixel values in the data
+ # array (that's what the ``bitdepth=16`` argument is for).
+ # If you don't, then the resulting PNG file will likely be
+ # very dark. Hey, it's only an example.
+ pngWriter = png.Writer(column_count, row_count,
+ greyscale=False,
+ alpha=False,
+ bitdepth=16)
+ # As of 2009-04-13 passing a numpy array that has an element type
+ # that is a numpy integer type (for example, the `image_3d` array has an
+ # element type of ``numpy.uint16``) generates a deprecation warning.
+ # This is probably a bug in numpy; it may go away in the future.
+ # The code still works despite the warning.
+ # See http://code.google.com/p/pypng/issues/detail?id=44
+# --- extract 003 start
+ pngWriter.write(pngfile,
+ numpy.reshape(image_3d, (-1, column_count*plane_count)))
+# --- extract 003 end
+finally:
+ pngfile.close()
+
diff --git a/build/pypng/iccp.py b/build/pypng/iccp.py
new file mode 100644
index 0000000..190db73
--- /dev/null
+++ b/build/pypng/iccp.py
@@ -0,0 +1,537 @@
+#!/usr/bin/env python
+# $URL: http://pypng.googlecode.com/svn/trunk/code/iccp.py $
+# $Rev: 182 $
+
+# iccp
+#
+# International Color Consortium Profile
+#
+# Tools for manipulating ICC profiles.
+#
+# An ICC profile can be extracted from a PNG image (iCCP chunk).
+#
+#
+# Non-standard ICCP tags.
+#
+# Apple use some (widespread but) non-standard tags. These can be
+# displayed in Apple's ColorSync Utility.
+# - 'vcgt' (Video Card Gamma Tag). Table to load into video
+# card LUT to apply gamma.
+# - 'ndin' Apple display native information.
+# - 'dscm' Apple multi-localized description strings.
+# - 'mmod' Apple display make and model information.
+#
+
+# References
+#
+# [ICC 2001] ICC Specification ICC.1:2001-04 (Profile version 2.4.0)
+# [ICC 2004] ICC Specification ICC.1:2004-10 (Profile version 4.2.0.0)
+
+import struct
+
+import png
+
+class FormatError(Exception):
+ pass
+
+class Profile:
+ """An International Color Consortium Profile (ICC Profile)."""
+
+ def __init__(self):
+ self.rawtagtable = None
+ self.rawtagdict = {}
+ self.d = dict()
+
+ def fromFile(self, inp, name='<unknown>'):
+
+ # See [ICC 2004]
+ profile = inp.read(128)
+ if len(profile) < 128:
+ raise FormatError("ICC Profile is too short.")
+ size, = struct.unpack('>L', profile[:4])
+ profile += inp.read(d['size'] - len(profile))
+ return self.fromString(profile, name)
+
+ def fromString(self, profile, name='<unknown>'):
+ self.d = dict()
+ d = self.d
+ if len(profile) < 128:
+ raise FormatError("ICC Profile is too short.")
+ d.update(
+ zip(['size', 'preferredCMM', 'version',
+ 'profileclass', 'colourspace', 'pcs'],
+ struct.unpack('>L4sL4s4s4s', profile[:24])))
+ if len(profile) < d['size']:
+ warnings.warn(
+ 'Profile size declared to be %d, but only got %d bytes' %
+ (d['size'], len(profile)))
+ d['version'] = '%08x' % d['version']
+ d['created'] = readICCdatetime(profile[24:36])
+ d.update(
+ zip(['acsp', 'platform', 'flag', 'manufacturer', 'model'],
+ struct.unpack('>4s4s3L', profile[36:56])))
+ if d['acsp'] != 'acsp':
+ warnings.warn('acsp field not present (not an ICC Profile?).')
+ d['deviceattributes'] = profile[56:64]
+ d['intent'], = struct.unpack('>L', profile[64:68])
+ d['pcsilluminant'] = readICCXYZNumber(profile[68:80])
+ d['creator'] = profile[80:84]
+ d['id'] = profile[84:100]
+ ntags, = struct.unpack('>L', profile[128:132])
+ d['ntags'] = ntags
+ fmt = '4s2L' * ntags
+ # tag table
+ tt = struct.unpack('>' + fmt, profile[132:132+12*ntags])
+ tt = group(tt, 3)
+
+ # Could (should) detect 2 or more tags having the same sig. But
+ # we don't. Two or more tags with the same sig is illegal per
+ # the ICC spec.
+
+ # Convert (sig,offset,size) triples into (sig,value) pairs.
+ rawtag = map(lambda x: (x[0], profile[x[1]:x[1]+x[2]]), tt)
+ self.rawtagtable = rawtag
+ self.rawtagdict = dict(rawtag)
+ tag = dict()
+ # Interpret the tags whose types we know about
+ for sig, v in rawtag:
+ if sig in tag:
+ warnings.warn("Duplicate tag %r found. Ignoring." % sig)
+ continue
+ v = ICCdecode(v)
+ if v is not None:
+ tag[sig] = v
+ self.tag = tag
+ return self
+
+ def greyInput(self):
+ """Adjust ``self.d`` dictionary for greyscale input device.
+ ``profileclass`` is 'scnr', ``colourspace`` is 'GRAY', ``pcs``
+ is 'XYZ '.
+ """
+
+ self.d.update(dict(profileclass='scnr',
+ colourspace='GRAY', pcs='XYZ '))
+ return self
+
+ def maybeAddDefaults(self):
+ if self.rawtagdict:
+ return
+ self._addTags(
+ cprt='Copyright unknown.',
+ desc='created by $URL: http://pypng.googlecode.com/svn/trunk/code/iccp.py $ $Rev: 182 $',
+ wtpt=D50(),
+ )
+
+ def addTags(self, **k):
+ self.maybeAddDefaults()
+ self._addTags(**k)
+
+ def _addTags(self, **k):
+ """Helper for :meth:`addTags`."""
+
+ for tag, thing in k.items():
+ if not isinstance(thing, (tuple, list)):
+ thing = (thing,)
+ typetag = defaulttagtype[tag]
+ self.rawtagdict[tag] = encode(typetag, *thing)
+ return self
+
+ def write(self, out):
+ """Write ICC Profile to the file."""
+
+ if not self.rawtagtable:
+ self.rawtagtable = self.rawtagdict.items()
+ tags = tagblock(self.rawtagtable)
+ self.writeHeader(out, 128 + len(tags))
+ out.write(tags)
+ out.flush()
+
+ return self
+
+ def writeHeader(self, out, size=999):
+ """Add default values to the instance's `d` dictionary, then
+ write a header out onto the file stream. The size of the
+ profile must be specified using the `size` argument.
+ """
+
+ def defaultkey(d, key, value):
+ """Add ``[key]==value`` to the dictionary `d`, but only if
+ it does not have that key already.
+ """
+
+ if key in d:
+ return
+ d[key] = value
+
+ z = '\x00' * 4
+ defaults = dict(preferredCMM=z,
+ version='02000000',
+ profileclass=z,
+ colourspace=z,
+ pcs='XYZ ',
+ created=writeICCdatetime(),
+ acsp='acsp',
+ platform=z,
+ flag=0,
+ manufacturer=z,
+ model=0,
+ deviceattributes=0,
+ intent=0,
+ pcsilluminant=encodefuns()['XYZ'](*D50()),
+ creator=z,
+ )
+ for k,v in defaults.items():
+ defaultkey(self.d, k, v)
+
+ hl = map(self.d.__getitem__,
+ ['preferredCMM', 'version', 'profileclass', 'colourspace',
+ 'pcs', 'created', 'acsp', 'platform', 'flag',
+ 'manufacturer', 'model', 'deviceattributes', 'intent',
+ 'pcsilluminant', 'creator'])
+ # Convert to struct.pack input
+ hl[1] = int(hl[1], 16)
+
+ out.write(struct.pack('>L4sL4s4s4s12s4s4sL4sLQL12s4s', size, *hl))
+ out.write('\x00' * 44)
+ return self
+
+def encodefuns():
+ """Returns a dictionary mapping ICC type signature sig to encoding
+ function. Each function returns a string comprising the content of
+ the encoded value. To form the full value, the type sig and the 4
+ zero bytes should be prefixed (8 bytes).
+ """
+
+ def desc(ascii):
+ """Return textDescription type [ICC 2001] 6.5.17. The ASCII part is
+ filled in with the string `ascii`, the Unicode and ScriptCode parts
+ are empty."""
+
+ ascii += '\x00'
+ l = len(ascii)
+
+ return struct.pack('>L%ds2LHB67s' % l,
+ l, ascii, 0, 0, 0, 0, '')
+
+ def text(ascii):
+ """Return textType [ICC 2001] 6.5.18."""
+
+ return ascii + '\x00'
+
+ def curv(f=None, n=256):
+ """Return a curveType, [ICC 2001] 6.5.3. If no arguments are
+ supplied then a TRC for a linear response is generated (no entries).
+ If an argument is supplied and it is a number (for *f* to be a
+ number it means that ``float(f)==f``) then a TRC for that
+ gamma value is generated.
+ Otherwise `f` is assumed to be a function that maps [0.0, 1.0] to
+ [0.0, 1.0]; an `n` element table is generated for it.
+ """
+
+ if f is None:
+ return struct.pack('>L', 0)
+ try:
+ if float(f) == f:
+ return struct.pack('>LH', 1, int(round(f*2**8)))
+ except (TypeError, ValueError):
+ pass
+ assert n >= 2
+ table = []
+ M = float(n-1)
+ for i in range(n):
+ x = i/M
+ table.append(int(round(f(x) * 65535)))
+ return struct.pack('>L%dH' % n, n, *table)
+
+ def XYZ(*l):
+ return struct.pack('>3l', *map(fs15f16, l))
+
+ return locals()
+
+# Tag type defaults.
+# Most tags can only have one or a few tag types.
+# When encoding, we associate a default tag type with each tag so that
+# the encoding is implicit.
+defaulttagtype=dict(
+ A2B0='mft1',
+ A2B1='mft1',
+ A2B2='mft1',
+ bXYZ='XYZ',
+ bTRC='curv',
+ B2A0='mft1',
+ B2A1='mft1',
+ B2A2='mft1',
+ calt='dtim',
+ targ='text',
+ chad='sf32',
+ chrm='chrm',
+ cprt='desc',
+ crdi='crdi',
+ dmnd='desc',
+ dmdd='desc',
+ devs='',
+ gamt='mft1',
+ kTRC='curv',
+ gXYZ='XYZ',
+ gTRC='curv',
+ lumi='XYZ',
+ meas='',
+ bkpt='XYZ',
+ wtpt='XYZ',
+ ncol='',
+ ncl2='',
+ resp='',
+ pre0='mft1',
+ pre1='mft1',
+ pre2='mft1',
+ desc='desc',
+ pseq='',
+ psd0='data',
+ psd1='data',
+ psd2='data',
+ psd3='data',
+ ps2s='data',
+ ps2i='data',
+ rXYZ='XYZ',
+ rTRC='curv',
+ scrd='desc',
+ scrn='',
+ tech='sig',
+ bfd='',
+ vued='desc',
+ view='view',
+)
+
+def encode(tsig, *l):
+ """Encode a Python value as an ICC type. `tsig` is the type
+ signature to (the first 4 bytes of the encoded value, see [ICC 2004]
+ section 10.
+ """
+
+ fun = encodefuns()
+ if tsig not in fun:
+ raise "No encoder for type %r." % tsig
+ v = fun[tsig](*l)
+ # Padd tsig out with spaces.
+ tsig = (tsig + ' ')[:4]
+ return tsig + '\x00'*4 + v
+
+def tagblock(tag):
+ """`tag` should be a list of (*signature*, *element*) pairs, where
+ *signature* (the key) is a length 4 string, and *element* is the
+ content of the tag element (another string).
+
+ The entire tag block (consisting of first a table and then the
+ element data) is constructed and returned as a string.
+ """
+
+ n = len(tag)
+ tablelen = 12*n
+
+ # Build the tag table in two parts. A list of 12-byte tags, and a
+ # string of element data. Offset is the offset from the start of
+ # the profile to the start of the element data (so the offset for
+ # the next element is this offset plus the length of the element
+ # string so far).
+ offset = 128 + tablelen + 4
+ # The table. As a string.
+ table = ''
+ # The element data
+ element = ''
+ for k,v in tag:
+ table += struct.pack('>4s2L', k, offset + len(element), len(v))
+ element += v
+ return struct.pack('>L', n) + table + element
+
+def iccp(out, inp):
+ profile = Profile().fromString(*profileFromPNG(inp))
+ print >>out, profile.d
+ print >>out, map(lambda x: x[0], profile.rawtagtable)
+ print >>out, profile.tag
+
+def profileFromPNG(inp):
+ """Extract profile from PNG file. Return (*profile*, *name*)
+ pair."""
+ r = png.Reader(file=inp)
+ _,chunk = r.chunk('iCCP')
+ i = chunk.index('\x00')
+ name = chunk[:i]
+ compression = chunk[i+1]
+ assert compression == chr(0)
+ profile = chunk[i+2:].decode('zlib')
+ return profile, name
+
+def iccpout(out, inp):
+ """Extract ICC Profile from PNG file `inp` and write it to
+ the file `out`."""
+
+ out.write(profileFromPNG(inp)[0])
+
+def fs15f16(x):
+ """Convert float to ICC s15Fixed16Number (as a Python ``int``)."""
+
+ return int(round(x * 2**16))
+
+def D50():
+ """Return D50 illuminant as an (X,Y,Z) triple."""
+
+ # See [ICC 2001] A.1
+ return (0.9642, 1.0000, 0.8249)
+
+
+def writeICCdatetime(t=None):
+ """`t` should be a gmtime tuple (as returned from
+ ``time.gmtime()``). If not supplied, the current time will be used.
+ Return an ICC dateTimeNumber in a 12 byte string.
+ """
+
+ import time
+ if t is None:
+ t = time.gmtime()
+ return struct.pack('>6H', *t[:6])
+
+def readICCdatetime(s):
+ """Convert from 12 byte ICC representation of dateTimeNumber to
+ ISO8601 string. See [ICC 2004] 5.1.1"""
+
+ return '%04d-%02d-%02dT%02d:%02d:%02dZ' % struct.unpack('>6H', s)
+
+def readICCXYZNumber(s):
+ """Convert from 12 byte ICC representation of XYZNumber to (x,y,z)
+ triple of floats. See [ICC 2004] 5.1.11"""
+
+ return s15f16l(s)
+
+def s15f16l(s):
+ """Convert sequence of ICC s15Fixed16 to list of float."""
+ # Note: As long as float has at least 32 bits of mantissa, all
+ # values are preserved.
+ n = len(s)//4
+ t = struct.unpack('>%dl' % n, s)
+ return map((2**-16).__mul__, t)
+
+# Several types and their byte encodings are defined by [ICC 2004]
+# section 10. When encoded, a value begins with a 4 byte type
+# signature. We use the same 4 byte type signature in the names of the
+# Python functions that decode the type into a Pythonic representation.
+
+def ICCdecode(s):
+ """Take an ICC encoded tag, and dispatch on its type signature
+ (first 4 bytes) to decode it into a Python value. Pair (*sig*,
+ *value*) is returned, where *sig* is a 4 byte string, and *value* is
+ some Python value determined by the content and type.
+ """
+
+ sig = s[0:4].strip()
+ f=dict(text=RDtext,
+ XYZ=RDXYZ,
+ curv=RDcurv,
+ vcgt=RDvcgt,
+ sf32=RDsf32,
+ )
+ if sig not in f:
+ return None
+ return (sig, f[sig](s))
+
+def RDXYZ(s):
+ """Convert ICC XYZType to rank 1 array of trimulus values."""
+
+ # See [ICC 2001] 6.5.26
+ assert s[0:4] == 'XYZ '
+ return readICCXYZNumber(s[8:])
+
+def RDsf32(s):
+ """Convert ICC s15Fixed16ArrayType to list of float."""
+ # See [ICC 2004] 10.18
+ assert s[0:4] == 'sf32'
+ return s15f16l(s[8:])
+
+def RDmluc(s):
+ """Convert ICC multiLocalizedUnicodeType. This types encodes
+ several strings together with a language/country code for each
+ string. A list of (*lc*, *string*) pairs is returned where *lc* is
+ the 4 byte language/country code, and *string* is the string
+ corresponding to that code. It seems unlikely that the same
+ language/country code will appear more than once with different
+ strings, but the ICC standard does not prohibit it."""
+ # See [ICC 2004] 10.13
+ assert s[0:4] == 'mluc'
+ n,sz = struct.unpack('>2L', s[8:16])
+ assert sz == 12
+ record = []
+ for i in range(n):
+ lc,l,o = struct.unpack('4s2L', s[16+12*n:28+12*n])
+ record.append(lc, s[o:o+l])
+ # How are strings encoded?
+ return record
+
+def RDtext(s):
+ """Convert ICC textType to Python string."""
+ # Note: type not specified or used in [ICC 2004], only in older
+ # [ICC 2001].
+ # See [ICC 2001] 6.5.18
+ assert s[0:4] == 'text'
+ return s[8:-1]
+
+def RDcurv(s):
+ """Convert ICC curveType."""
+ # See [ICC 2001] 6.5.3
+ assert s[0:4] == 'curv'
+ count, = struct.unpack('>L', s[8:12])
+ if count == 0:
+ return dict(gamma=1)
+ table = struct.unpack('>%dH' % count, s[12:])
+ if count == 1:
+ return dict(gamma=table[0]*2**-8)
+ return table
+
+def RDvcgt(s):
+ """Convert Apple CMVideoCardGammaType."""
+ # See
+ # http://developer.apple.com/documentation/GraphicsImaging/Reference/ColorSync_Manager/Reference/reference.html#//apple_ref/c/tdef/CMVideoCardGammaType
+ assert s[0:4] == 'vcgt'
+ tagtype, = struct.unpack('>L', s[8:12])
+ if tagtype != 0:
+ return s[8:]
+ if tagtype == 0:
+ # Table.
+ channels,count,size = struct.unpack('>3H', s[12:18])
+ if size == 1:
+ fmt = 'B'
+ elif size == 2:
+ fmt = 'H'
+ else:
+ return s[8:]
+ l = len(s[18:])//size
+ t = struct.unpack('>%d%s' % (l, fmt), s[18:])
+ t = group(t, count)
+ return size, t
+ return s[8:]
+
+
+def group(s, n):
+ # See
+ # http://www.python.org/doc/2.6/library/functions.html#zip
+ return zip(*[iter(s)]*n)
+
+
+def main(argv=None):
+ import sys
+ from getopt import getopt
+ if argv is None:
+ argv = sys.argv
+ argv = argv[1:]
+ opt,arg = getopt(argv, 'o:')
+ if len(arg) > 0:
+ inp = open(arg[0], 'rb')
+ else:
+ inp = sys.stdin
+ for o,v in opt:
+ if o == '-o':
+ f = open(v, 'wb')
+ return iccpout(f, inp)
+ return iccp(sys.stdout, inp)
+
+if __name__ == '__main__':
+ main()
diff --git a/build/pypng/mkiccp.py b/build/pypng/mkiccp.py
new file mode 100644
index 0000000..08e8df6
--- /dev/null
+++ b/build/pypng/mkiccp.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# $URL: http://pypng.googlecode.com/svn/trunk/code/mkiccp.py $
+# $Rev: 182 $
+# Make ICC Profile
+
+# References
+#
+# [ICC 2001] ICC Specification ICC.1:2001-04 (Profile version 2.4.0)
+# [ICC 2004] ICC Specification ICC.1:2004-10 (Profile version 4.2.0.0)
+
+import struct
+
+# Local module.
+import iccp
+
+def black(m):
+ """Return a function that maps all values from [0.0,m] to 0, and maps
+ the range [m,1.0] into [0.0, 1.0] linearly.
+ """
+
+ m = float(m)
+
+ def f(x):
+ if x <= m:
+ return 0.0
+ return (x-m)/(1.0-m)
+ return f
+
+# For monochrome input the required tags are (See [ICC 2001] 6.3.1.1):
+# profileDescription [ICC 2001] 6.4.32
+# grayTRC [ICC 2001] 6.4.19
+# mediaWhitePoint [ICC 2001] 6.4.25
+# copyright [ICC 2001] 6.4.13
+
+def agreyprofile(out):
+ it = iccp.Profile().greyInput()
+ it.addTags(kTRC=black(0.07))
+ it.write(out)
+
+def main():
+ import sys
+ agreyprofile(sys.stdout)
+
+if __name__ == '__main__':
+ main()
diff --git a/build/pypng/pdsimgtopng b/build/pypng/pdsimgtopng
new file mode 100644
index 0000000..975db93
--- /dev/null
+++ b/build/pypng/pdsimgtopng
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+# $URL: http://pypng.googlecode.com/svn/trunk/code/pdsimgtopng $
+# $Rev: 154 $
+# PDS Image to PNG
+
+import re
+import struct
+
+import png
+
+class FormatError(Exception):
+ pass
+
+def pdskey(s, k):
+ """Lookup key `k` in string `s`. Returns value (as a string), or
+ raises exception if not found.
+ """
+
+ assert re.match(r' *\^?[:\w]+$', k)
+ safere = '^' + re.escape(k) +r' *= *(\w+)'
+ m = re.search(safere, s, re.MULTILINE)
+ if not m:
+ raise FormatError("Can't find %s." % k)
+ return m.group(1)
+
+def img(inp):
+ """Open the PDS IMG file `inp` and return (*pixels*, *info*).
+ *pixels* is an iterator over the rows, *info* is the information
+ dictionary.
+ """
+
+ err = __import__('sys').stderr
+
+ consumed = 1024
+
+ s = inp.read(consumed)
+ record_type = pdskey(s, 'RECORD_TYPE')
+ if record_type != 'FIXED_LENGTH':
+ raise FormatError(
+ "Can only deal with FIXED_LENGTH record type (found %s)" %
+ record_type)
+ record_bytes = int(pdskey(s,'RECORD_BYTES'))
+ file_records = int(pdskey(s, 'FILE_RECORDS'))
+ label_records = int(pdskey(s, 'LABEL_RECORDS'))
+ remaining = label_records * record_bytes - consumed
+ s += inp.read(remaining)
+ consumed += remaining
+
+ image_pointer = int(pdskey(s, '^IMAGE'))
+ # "^IMAGE" locates a record. Records are numbered starting from 1.
+ image_index = image_pointer - 1
+ image_offset = image_index * record_bytes
+ gap = image_offset - consumed
+ assert gap >= 0
+ if gap:
+ inp.read(gap)
+ # This assumes there is only one OBJECT in the file, and it is the
+ # IMAGE.
+ height = int(pdskey(s, ' LINES'))
+ width = int(pdskey(s, ' LINE_SAMPLES'))
+ sample_type = pdskey(s, ' SAMPLE_TYPE')
+ sample_bits = int(pdskey(s, ' SAMPLE_BITS'))
+ # For Messenger MDIS, SAMPLE_BITS is reported as 16, but only values
+ # from 0 ot 4095 are used.
+ bitdepth = 12
+ if sample_type == 'MSB_UNSIGNED_INTEGER':
+ fmt = '>H'
+ else:
+ raise 'Unknown sample type: %s.' % sample_type
+ sample_bytes = (1,2)[bitdepth > 8]
+ row_bytes = sample_bytes * width
+ fmt = fmt[:1] + str(width) + fmt[1:]
+ def rowiter():
+ for y in range(height):
+ yield struct.unpack(fmt, inp.read(row_bytes))
+ info = dict(greyscale=True, alpha=False, bitdepth=bitdepth,
+ size=(width,height), gamma=1.0)
+ return rowiter(), info
+
+
+def main(argv=None):
+ import sys
+
+ if argv is None:
+ argv = sys.argv
+ argv = argv[1:]
+ arg = argv
+ if len(arg) >= 1:
+ f = open(arg[0], 'rb')
+ else:
+ f = sys.stdin
+ pixels,info = img(f)
+ w = png.Writer(**info)
+ w.write(sys.stdout, pixels)
+
+if __name__ == '__main__':
+ main()
+
+
diff --git a/build/pypng/pipasgrey b/build/pypng/pipasgrey
new file mode 100644
index 0000000..2b3727f
--- /dev/null
+++ b/build/pypng/pipasgrey
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+# $URL: http://pypng.googlecode.com/svn/trunk/code/pipasgrey $
+# $Rev: 187 $
+
+# pipasgrey
+
+# Convert image to grey (L, or LA), but only if that involves no colour
+# change.
+
+def asgrey(out, inp, quiet=False):
+ """Convert image to greyscale, but only when no colour change. This
+ works by using the input G channel (green) as the output L channel
+ (luminance) and checking that every pixel is grey as we go. A non-grey
+ pixel will raise an error, but if `quiet` is true then the grey pixel
+ check is suppressed.
+ """
+
+ from array import array
+
+ import png
+
+ r = png.Reader(file=inp)
+ _,_,pixels,info = r.asDirect()
+ if info['greyscale']:
+ w = png.Writer(**info)
+ return w.write(out, pixels)
+ planes = info['planes']
+ targetplanes = planes - 2
+ alpha = info['alpha']
+ width = info['size'][0]
+ typecode = 'BH'[info['bitdepth'] > 8]
+ # Values per target row
+ vpr = width * (targetplanes)
+ def iterasgrey():
+ for i,row in enumerate(pixels):
+ row = array(typecode, row)
+ targetrow = array(typecode, [0]*vpr)
+ # Copy G (and possibly A) channel.
+ green = row[0::planes]
+ if alpha:
+ targetrow[0::2] = green
+ targetrow[1::2] = row[3::4]
+ else:
+ targetrow = green
+ # Check R and B channel match.
+ if not quiet and (
+ green != row[0::planes] or green != row[2::planes]):
+ raise ValueError('Row %i contains non-grey pixel.' % i)
+ yield targetrow
+ info['greyscale'] = True
+ del info['planes']
+ w = png.Writer(**info)
+ w.write(out, iterasgrey())
+
+def main(argv=None):
+ from getopt import getopt
+ import sys
+ if argv is None:
+ argv = sys.argv
+ argv = argv[1:]
+ opt,argv = getopt(argv, 'q')
+ quiet = False
+ for o,v in opt:
+ if o == '-q':
+ quiet = True
+ if len(argv) > 0:
+ f = open(argv[0], 'rb')
+ else:
+ f = sys.stdin
+ return asgrey(sys.stdout, f, quiet)
+
+if __name__ == '__main__':
+ main()
diff --git a/build/pypng/pipcat b/build/pypng/pipcat
new file mode 100644
index 0000000..e0d0805
--- /dev/null
+++ b/build/pypng/pipcat
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# $URL: http://pypng.googlecode.com/svn/trunk/code/pipcat $
+# $Rev: 77 $
+
+# http://www.python.org/doc/2.4.4/lib/module-itertools.html
+import itertools
+import sys
+
+import png
+
+def cat(out, l):
+ """Concatenate the list of images. All input images must be same
+ height and have the same number of channels. They are concatenated
+ left-to-right. `out` is the (open file) destination for the
+ output image. `l` should be a list of open files (the input
+ image files).
+ """
+
+ l = map(lambda f: png.Reader(file=f), l)
+ # Ewgh, side effects.
+ map(lambda r: r.preamble(), l)
+ # The reference height; from the first image.
+ height = l[0].height
+ # The total target width
+ width = 0
+ for i,r in enumerate(l):
+ if r.height != height:
+ raise Error('Image %d, height %d, does not match %d.' %
+ (i, r.height, height))
+ width += r.width
+ pixel,info = zip(*map(lambda r: r.asDirect()[2:4], l))
+ tinfo = dict(info[0])
+ del tinfo['size']
+ w = png.Writer(width, height, **tinfo)
+ def itercat():
+ for row in itertools.izip(*pixel):
+ yield itertools.chain(*row)
+ w.write(out, itercat())
+
+def main(argv):
+ return cat(sys.stdout, map(lambda n: open(n, 'rb'), argv[1:]))
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/build/pypng/pipcolours b/build/pypng/pipcolours
new file mode 100644
index 0000000..7c76df8
--- /dev/null
+++ b/build/pypng/pipcolours
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+# $URL: http://pypng.googlecode.com/svn/trunk/code/pipcolours $
+# $Rev: 96 $
+
+# pipcolours - extract all colours present in source image.
+
+def colours(out, inp):
+ import itertools
+ import png
+
+ r = png.Reader(file=inp)
+ _,_,pixels,info = r.asDirect()
+ planes = info['planes']
+ col = set()
+ for row in pixels:
+ # Ewgh, side effects on col
+ map(col.add, png.group(row, planes))
+ col,planes = channel_reduce(col, planes)
+ col = list(col)
+ col.sort()
+ col = list(itertools.chain(*col))
+ width = len(col)//planes
+ greyscale = planes in (1,2)
+ alpha = planes in (2,4)
+ bitdepth = info['bitdepth']
+ w = png.Writer(width, 1,
+ bitdepth=bitdepth, greyscale=greyscale, alpha=alpha)
+ w.write(out, [col])
+
+def channel_reduce(col, planes):
+ """Attempt to reduce the number of channels in the set of
+ colours."""
+ if planes >= 3:
+ def isgrey(c):
+ return c[0] == c[1] == c[2]
+ if min(map(isgrey, col)) == True:
+ # Every colour is grey.
+ col = set(map(lambda x: x[0::3], col))
+ planes -= 2
+ return col,planes
+
+def main(argv=None):
+ import sys
+
+ if argv is None:
+ argv = sys.argv
+
+ argv = argv[1:]
+ if len(argv) > 0:
+ f = open(argv[0], 'rb')
+ else:
+ f = sys.stdin
+ return colours(sys.stdout, f)
+
+if __name__ == '__main__':
+ main()
diff --git a/build/pypng/pipcomposite b/build/pypng/pipcomposite
new file mode 100644
index 0000000..21dd283
--- /dev/null
+++ b/build/pypng/pipcomposite
@@ -0,0 +1,121 @@
+#!/usr/bin/env python
+# $URL: http://pypng.googlecode.com/svn/trunk/code/pipcomposite $
+# $Rev: 208 $
+# pipcomposite
+# Image alpha compositing.
+
+"""
+pipcomposite [--background #rrggbb] file.png
+
+Composite an image onto a background and output the result. The
+background colour is specified with an HTML-style triple (3, 6, or 12
+hex digits), and defaults to black (#000).
+
+The output PNG has no alpha channel.
+
+It is valid for the input to have no alpha channel, but it doesn't
+make much sense: the output will equal the input.
+"""
+
+import sys
+
+def composite(out, inp, background):
+ import png
+
+ p = png.Reader(file=inp)
+ w,h,pixel,info = p.asRGBA()
+
+ outinfo = dict(info)
+ outinfo['alpha'] = False
+ outinfo['planes'] -= 1
+ outinfo['interlace'] = 0
+
+ # Convert to tuple and normalise to same range as source.
+ background = rgbhex(background)
+ maxval = float(2**info['bitdepth'] - 1)
+ background = map(lambda x: int(0.5 + x*maxval/65535.0),
+ background)
+ # Repeat background so that it's a whole row of sample values.
+ background *= w
+
+ def iterrow():
+ for row in pixel:
+ # Remove alpha from row, then create a list with one alpha
+ # entry _per channel value_.
+ # Squirrel the alpha channel away (and normalise it).
+ t = map(lambda x: x/maxval, row[3::4])
+ row = list(row)
+ del row[3::4]
+ alpha = row[:]
+ for i in range(3):
+ alpha[i::3] = t
+ assert len(alpha) == len(row) == len(background)
+ yield map(lambda a,v,b: int(0.5 + a*v + (1.0-a)*b),
+ alpha, row, background)
+
+ w = png.Writer(**outinfo)
+ w.write(out, iterrow())
+
+def rgbhex(s):
+ """Take an HTML style string of the form "#rrggbb" and return a
+ colour (R,G,B) triple. Following the initial '#' there can be 3, 6,
+ or 12 digits (for 4-, 8- or 16- bits per channel). In all cases the
+ values are expanded to a full 16-bit range, so the returned values
+ are all in range(65536).
+ """
+
+ assert s[0] == '#'
+ s = s[1:]
+ assert len(s) in (3,6,12)
+
+ # Create a target list of length 12, and expand the string s to make
+ # it length 12.
+ l = ['z']*12
+ if len(s) == 3:
+ for i in range(4):
+ l[i::4] = s
+ if len(s) == 6:
+ for i in range(2):
+ l[i::4] = s[i::2]
+ l[i+2::4] = s[i::2]
+ if len(s) == 12:
+ l[:] = s
+ s = ''.join(l)
+ return map(lambda x: int(x, 16), (s[:4], s[4:8], s[8:]))
+
+class Usage(Exception):
+ pass
+
+def main(argv=None):
+ import getopt
+ import sys
+
+ if argv is None:
+ argv = sys.argv
+
+ argv = argv[1:]
+
+ try:
+ try:
+ opt,arg = getopt.getopt(argv, '',
+ ['background='])
+ except getopt.error, msg:
+ raise Usage(msg)
+ background = '#000'
+ for o,v in opt:
+ if o in ['--background']:
+ background = v
+ except Usage, err:
+ print >>sys.stderr, __doc__
+ print >>sys.stderr, str(err)
+ return 2
+
+ if len(arg) > 0:
+ f = open(arg[0], 'rb')
+ else:
+ f = sys.stdin
+ return composite(sys.stdout, f, background)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/build/pypng/pipdither b/build/pypng/pipdither
new file mode 100644
index 0000000..c14c76c
--- /dev/null
+++ b/build/pypng/pipdither
@@ -0,0 +1,181 @@
+#!/usr/bin/env python
+# $URL: http://pypng.googlecode.com/svn/trunk/code/pipdither $
+# $Rev: 150 $
+
+# pipdither
+# Error Diffusing image dithering.
+# Now with serpentine scanning.
+
+# See http://www.efg2.com/Lab/Library/ImageProcessing/DHALF.TXT
+
+# http://www.python.org/doc/2.4.4/lib/module-bisect.html
+from bisect import bisect_left
+
+import png
+
+def dither(out, inp,
+ bitdepth=1, linear=False, defaultgamma=1.0, targetgamma=None,
+ cutoff=0.75):
+ """Dither the input PNG `inp` into an image with a smaller bit depth
+ and write the result image onto `out`. `bitdepth` specifies the bit
+ depth of the new image.
+
+ Normally the source image gamma is honoured (the image is
+ converted into a linear light space before being dithered), but
+ if the `linear` argument is true then the image is treated as
+ being linear already: no gamma conversion is done (this is
+ quicker, and if you don't care much about accuracy, it won't
+ matter much).
+
+ Images with no gamma indication (no ``gAMA`` chunk) are normally
+ treated as linear (gamma = 1.0), but often it can be better
+ to assume a different gamma value: For example continuous tone
+ photographs intended for presentation on the web often carry
+ an implicit assumption of being encoded with a gamma of about
+ 0.45 (because that's what you get if you just "blat the pixels"
+ onto a PC framebuffer), so ``defaultgamma=0.45`` might be a
+ good idea. `defaultgamma` does not override a gamma value
+ specified in the file itself: It is only used when the file
+ does not specify a gamma.
+
+ If you (pointlessly) specify both `linear` and `defaultgamma`,
+ `linear` wins.
+
+ The gamma of the output image is, by default, the same as the input
+ image. The `targetgamma` argument can be used to specify a
+ different gamma for the output image. This effectively recodes the
+ image to a different gamma, dithering as we go. The gamma specified
+ is the exponent used to encode the output file (and appears in the
+ output PNG's ``gAMA`` chunk); it is usually less than 1.
+
+ """
+
+ # Encoding is what happened when the PNG was made (and also what
+ # happens when we output the PNG). Decoding is what we do to the
+ # source PNG in order to process it.
+
+ # The dithering algorithm is not completely general; it
+ # can only do bit depth reduction, not arbitrary palette changes.
+ import operator
+ maxval = 2**bitdepth - 1
+ r = png.Reader(file=inp)
+ # If image gamma is 1 or gamma is not present and we are assuming a
+ # value of 1, then it is faster to pass a maxval parameter to
+ # asFloat (the multiplications get combined). With gamma, we have
+ # to have the pixel values from 0.0 to 1.0 (as long as we are doing
+ # gamma correction here).
+ # Slightly annoyingly, we don't know the image gamma until we've
+ # called asFloat().
+ _,_,pixels,info = r.asDirect()
+ planes = info['planes']
+ assert planes == 1
+ width = info['size'][0]
+ sourcemaxval = 2**info['bitdepth'] - 1
+ if linear:
+ gamma = 1
+ else:
+ gamma = info.get('gamma') or defaultgamma
+ # Convert gamma from encoding gamma to the required power for
+ # decoding.
+ decode = 1.0/gamma
+ # Build a lookup table for decoding; convert from pixel values to linear
+ # space:
+ sourcef = 1.0/sourcemaxval
+ incode = map(sourcef.__mul__, range(sourcemaxval+1))
+ if decode != 1.0:
+ incode = map(decode.__rpow__, incode)
+ # Could be different, later on. targetdecode is the assumed gamma
+ # that is going to be used to decoding the target PNG. It is the
+ # reciprocal of the exponent that we use to encode the target PNG.
+ # This is the value that we need to build our table that we use for
+ # converting from linear to target colour space.
+ if targetgamma is None:
+ targetdecode = decode
+ else:
+ targetdecode = 1.0/targetgamma
+ # The table we use for encoding (creating the target PNG), still
+ # maps from pixel value to linear space, but we use it inverted, by
+ # searching through it with bisect.
+ targetf = 1.0/maxval
+ outcode = map(targetf.__mul__, range(maxval+1))
+ if targetdecode != 1.0:
+ outcode = map(targetdecode.__rpow__, outcode)
+ # The table used for choosing output codes. These values represent
+ # the cutoff points between two adjacent output codes.
+ choosecode = zip(outcode[1:], outcode)
+ p = cutoff
+ choosecode = map(lambda x: x[0]*p+x[1]*(1.0-p), choosecode)
+ def iterdither():
+ # Errors diffused downwards (into next row)
+ ed = [0.0]*width
+ flipped = False
+ for row in pixels:
+ row = map(incode.__getitem__, row)
+ row = map(operator.add, ed, row)
+ if flipped:
+ row = row[::-1]
+ targetrow = [0] * width
+ for i,v in enumerate(row):
+ # Clamp. Necessary because previously added errors may take
+ # v out of range.
+ v = max(0.0, min(v, 1.0))
+ # `it` will be the index of the chosen target colour;
+ it = bisect_left(choosecode, v)
+ t = outcode[it]
+ targetrow[i] = it
+ # err is the error that needs distributing.
+ err = v - t
+ # Sierra "Filter Lite" distributes * 2
+ # as per this diagram. 1 1
+ ef = err/2.0
+ # :todo: consider making rows one wider at each end and
+ # removing "if"s
+ if i+1 < width:
+ row[i+1] += ef
+ ef /= 2.0
+ ed[i] = ef
+ if i:
+ ed[i-1] += ef
+ if flipped:
+ ed = ed[::-1]
+ targetrow = targetrow[::-1]
+ yield targetrow
+ flipped = not flipped
+ info['bitdepth'] = bitdepth
+ info['gamma'] = 1.0/targetdecode
+ w = png.Writer(**info)
+ w.write(out, iterdither())
+
+
+def main(argv=None):
+ # http://www.python.org/doc/2.4.4/lib/module-getopt.html
+ from getopt import getopt
+ import sys
+ if argv is None:
+ argv = sys.argv
+ opt,argv = getopt(argv[1:], 'b:c:g:lo:')
+ k = {}
+ for o,v in opt:
+ if o == '-b':
+ k['bitdepth'] = int(v)
+ if o == '-c':
+ k['cutoff'] = float(v)
+ if o == '-g':
+ k['defaultgamma'] = float(v)
+ if o == '-l':
+ k['linear'] = True
+ if o == '-o':
+ k['targetgamma'] = float(v)
+ if o == '-?':
+ print >>sys.stderr, "pipdither [-b bits] [-c cutoff] [-g assumed-gamma] [-l] [in.png]"
+
+ if len(argv) > 0:
+ f = open(argv[0], 'rb')
+ else:
+ f = sys.stdin
+
+ return dither(sys.stdout, f, **k)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/build/pypng/piprgb b/build/pypng/piprgb
new file mode 100644
index 0000000..fbe1082
--- /dev/null
+++ b/build/pypng/piprgb
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+# $URL: http://pypng.googlecode.com/svn/trunk/code/piprgb $
+# $Rev: 131 $
+# piprgb
+#
+# Convert input image to RGB or RGBA format. Output will be colour type
+# 2 or 6, and will not have a tRNS chunk.
+
+import png
+
+def rgb(out, inp):
+ """Convert to RGB/RGBA."""
+
+ r = png.Reader(file=inp)
+ r.preamble()
+ if r.alpha or r.trns:
+ get = r.asRGBA
+ else:
+ get = r.asRGB
+ pixels,info = get()[2:4]
+ w = png.Writer(**info)
+ w.write(out, pixels)
+
+def main(argv=None):
+ import sys
+
+ if argv is None:
+ argv = sys.argv
+ if len(argv) > 1:
+ f = open(argv[1], 'rb')
+ else:
+ f = sys.stdin
+ return rgb(sys.stdout, f)
+
+if __name__ == '__main__':
+ main()
diff --git a/build/pypng/pipscalez b/build/pypng/pipscalez
new file mode 100644
index 0000000..c60762d
--- /dev/null
+++ b/build/pypng/pipscalez
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+# $URL: http://pypng.googlecode.com/svn/trunk/code/pipscalez $
+# $Rev: 131 $
+
+# pipscalez
+# Enlarge an image by an integer factor horizontally and vertically.
+
+def rescale(inp, out, xf, yf):
+ from array import array
+ import png
+
+ r = png.Reader(file=inp)
+ _,_,pixels,meta = r.asDirect()
+ typecode = 'BH'[meta['bitdepth'] > 8]
+ planes = meta['planes']
+ # We are going to use meta in the call to Writer, so expand the
+ # size.
+ x,y = meta['size']
+ x *= xf
+ y *= yf
+ meta['size'] = (x,y)
+ del x
+ del y
+ # Values per row, target row.
+ vpr = meta['size'][0] * planes
+ def iterscale():
+ for row in pixels:
+ bigrow = array(typecode, [0]*vpr)
+ row = array(typecode, row)
+ for c in range(planes):
+ channel = row[c::planes]
+ for i in range(xf):
+ bigrow[i*planes+c::xf*planes] = channel
+ for _ in range(yf):
+ yield bigrow
+ w = png.Writer(**meta)
+ w.write(out, iterscale())
+
+
+def main(argv=None):
+ import sys
+
+ if argv is None:
+ argv = sys.argv
+ xf = int(argv[1])
+ if len(argv) > 2:
+ yf = int(argv[2])
+ else:
+ yf = xf
+ return rescale(sys.stdin, sys.stdout, xf, yf)
+
+if __name__ == '__main__':
+ main()
diff --git a/build/pypng/pipstack b/build/pypng/pipstack
new file mode 100644
index 0000000..5523670
--- /dev/null
+++ b/build/pypng/pipstack
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+# $URL: http://pypng.googlecode.com/svn/trunk/code/pipstack $
+# $Rev: 190 $
+
+# pipstack
+# Combine input PNG files into a multi-channel output PNG.
+
+"""
+pipstack file1.png [file2.png ...]
+
+pipstack can be used to combine 3 greyscale PNG files into a colour, RGB,
+PNG file. In fact it is slightly more general than that. The number of
+channels in the output PNG is equal to the sum of the numbers of
+channels in the input images. It is an error if this sum exceeds 4 (the
+maximum number of channels in a PNG image is 4, for an RGBA image). The
+output colour model corresponds to the number of channels: 1 -
+greyscale; 2 - greyscale+alpha; 3 - RGB; 4 - RGB+alpha.
+
+In this way it is possible to combine 3 greyscale PNG files into an RGB
+PNG (a common expected use) as well as more esoteric options: rgb.png +
+grey.png = rgba.png; grey.png + grey.png = greyalpha.png.
+
+Color Profile, Gamma, and so on.
+
+[This is not implemented yet]
+
+If an input has an ICC Profile (``iCCP`` chunk) then the output will
+have an ICC Profile, but only if it is possible to combine all the input
+ICC Profiles. It is possible to combine all the input ICC Profiles
+only when: they all use the same Profile Connection Space; the PCS white
+point is the same (specified in the header; should always be D50);
+possibly some other things I haven't thought of yet.
+
+If some of the inputs have a ``gAMA`` chunk (specifying gamma) and
+an output ICC Profile is being generated, then the gamma information
+will be incorporated into the ICC Profile.
+
+When the output is an RGB colour type and the output ICC Profile is
+synthesized, it is necessary to supply colorant tags (``rXYZ`` and so
+on). These are taken from ``sRGB``.
+
+If the input images have ``gAMA`` chunks and no input image has an ICC
+Profile then the output image will have a ``gAMA`` chunk, but only if
+all the ``gAMA`` chunks specify the same value. Otherwise a warning
+will be emitted and no ``gAMA`` chunk. It is possible to add or replace
+a ``gAMA`` chunk using the ``pipchunk`` tool.
+
+gAMA, pHYs, iCCP, sRGB, tIME, any other chunks.
+"""
+
+class Error(Exception):
+ pass
+
+def stack(out, inp):
+ """Stack the input PNG files into a single output PNG."""
+
+ from array import array
+ import itertools
+ # Local module
+ import png
+
+ if len(inp) < 1:
+ raise Error("Required input is missing.")
+
+ l = map(png.Reader, inp)
+ # Let data be a list of (pixel,info) pairs.
+ data = map(lambda p: p.asDirect()[2:], l)
+ totalchannels = sum(map(lambda x: x[1]['planes'], data))
+
+ if not (0 < totalchannels <= 4):
+ raise Error("Too many channels in input.")
+ alpha = totalchannels in (2,4)
+ greyscale = totalchannels in (1,2)
+ bitdepth = []
+ for b in map(lambda x: x[1]['bitdepth'], data):
+ try:
+ if b == int(b):
+ bitdepth.append(b)
+ continue
+ except (TypeError, ValueError):
+ pass
+ # Assume a tuple.
+ bitdepth += b
+ # Currently, fail unless all bitdepths equal.
+ if len(set(bitdepth)) > 1:
+ raise NotImplemented("Cannot cope when bitdepths differ - sorry!")
+ bitdepth = bitdepth[0]
+ arraytype = 'BH'[bitdepth > 8]
+ size = map(lambda x: x[1]['size'], data)
+ # Currently, fail unless all images same size.
+ if len(set(size)) > 1:
+ raise NotImplemented("Cannot cope when sizes differ - sorry!")
+ size = size[0]
+ # Values per row
+ vpr = totalchannels * size[0]
+ def iterstack():
+ # the izip call creates an iterator that yields the next row
+ # from all the input images combined into a tuple.
+ for irow in itertools.izip(*map(lambda x: x[0], data)):
+ row = array(arraytype, [0]*vpr)
+ # output channel
+ och = 0
+ for i,arow in enumerate(irow):
+ # ensure incoming row is an array
+ arow = array(arraytype, arow)
+ n = data[i][1]['planes']
+ for j in range(n):
+ row[och::totalchannels] = arow[j::n]
+ och += 1
+ yield row
+ w = png.Writer(size[0], size[1],
+ greyscale=greyscale, alpha=alpha, bitdepth=bitdepth)
+ w.write(out, iterstack())
+
+
+def main(argv=None):
+ import sys
+
+ if argv is None:
+ argv = sys.argv
+ argv = argv[1:]
+ arg = argv[:]
+ return stack(sys.stdout, arg)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/build/pypng/pipwindow b/build/pypng/pipwindow
new file mode 100644
index 0000000..2f8c7a2
--- /dev/null
+++ b/build/pypng/pipwindow
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+# $URL: http://pypng.googlecode.com/svn/trunk/code/pipwindow $
+# $Rev: 173 $
+
+# pipwindow
+# Tool to crop/expand an image to a rectangular window. Come the
+# revolution this tool will allow the image and the window to be placed
+# arbitrarily (in particular the window can be bigger than the picture
+# and/or overlap it only partially) and the image can be OpenGL style
+# border/repeat effects (repeat, mirrored repeat, clamp, fixed
+# background colour, background colour from source file). For now it
+# only acts as crop. The window must be no greater than the image in
+# both x and y.
+
+def window(tl, br, inp, out):
+ """Place a window onto the image and cut-out the resulting
+ rectangle. The window is an axis aligned rectangle opposite corners
+ at *tl* and *br* (each being an (x,y) pair). *inp* specifies the
+ input file which should be a PNG image.
+ """
+
+ import png
+
+ r = png.Reader(file=inp)
+ x,y,pixels,meta = r.asDirect()
+ if not (0 <= tl[0] < br[0] <= x):
+ raise NotImplementedError()
+ if not (0 <= tl[1] < br[1] <= y):
+ raise NotImplementedError()
+ # Compute left and right bounds for each row
+ l = tl[0] * meta['planes']
+ r = br[0] * meta['planes']
+ def itercrop():
+ """An iterator to perform the crop."""
+
+ for i,row in enumerate(pixels):
+ if i < tl[1]:
+ continue
+ if i >= br[1]:
+ # Same as "raise StopIteration"
+ return
+ yield row[l:r]
+ meta['size'] = (br[0]-tl[0], br[1]-tl[1])
+ w = png.Writer(**meta)
+ w.write(out, itercrop())
+
+def main(argv=None):
+ import sys
+
+ if argv is None:
+ argv = sys.argv
+ argv = argv[1:]
+
+ tl = (0,0)
+ br = tuple(map(int, argv[:2]))
+ if len(argv) >= 4:
+ tl = br
+ br = tuple(map(int, argv[2:4]))
+ if len(argv) in (2, 4):
+ f = sys.stdin
+ else:
+ f = open(argv[-1], 'rb')
+
+ return window(tl, br, f, sys.stdout)
+
+if __name__ == '__main__':
+ main()
diff --git a/build/pypng/plan9topng.py b/build/pypng/plan9topng.py
new file mode 100644
index 0000000..4600b4c
--- /dev/null
+++ b/build/pypng/plan9topng.py
@@ -0,0 +1,293 @@
+#!/usr/bin/env python
+# $Rev: 184 $
+# $URL: http://pypng.googlecode.com/svn/trunk/code/plan9topng.py $
+
+# Imported from //depot/prj/plan9topam/master/code/plan9topam.py#4 on
+# 2009-06-15.
+
+"""Command line tool to convert from Plan 9 image format to PNG format.
+
+Plan 9 image format description:
+http://plan9.bell-labs.com/magic/man2html/6/image
+"""
+
+# http://www.python.org/doc/2.3.5/lib/module-itertools.html
+import itertools
+# http://www.python.org/doc/2.3.5/lib/module-re.html
+import re
+# http://www.python.org/doc/2.3.5/lib/module-sys.html
+import sys
+
+def block(s, n):
+ # See http://www.python.org/doc/2.6.2/library/functions.html#zip
+ return zip(*[iter(s)]*n)
+
+def convert(f, output=sys.stdout) :
+ """Convert Plan 9 file to PNG format. Works with either uncompressed
+ or compressed files.
+ """
+
+ r = f.read(11)
+ if r == 'compressed\n' :
+ png(output, *decompress(f))
+ else :
+ png(output, *glue(f, r))
+
+
+def glue(f, r) :
+ """Return (metadata, stream) pair where `r` is the initial portion of
+ the metadata that has already been read from the stream `f`.
+ """
+
+ r = r + f.read(60-len(r))
+ return (r, f)
+
+def meta(r) :
+ """Convert 60 character string `r`, the metadata from an image file.
+ Returns a 5-tuple (*chan*,*minx*,*miny*,*limx*,*limy*). 5-tuples may
+ settle into lists in transit.
+
+ As per http://plan9.bell-labs.com/magic/man2html/6/image the metadata
+ comprises 5 words separated by blanks. As it happens each word starts
+ at an index that is a multiple of 12, but this routine does not care
+ about that."""
+
+ r = r.split()
+ # :todo: raise FormatError
+ assert len(r) == 5
+ r = [r[0]] + map(int, r[1:])
+ return r
+
+def bitdepthof(pixel) :
+ """Return the bitdepth for a Plan9 pixel format string."""
+
+ maxd = 0
+ for c in re.findall(r'[a-z]\d*', pixel) :
+ if c[0] != 'x':
+ maxd = max(maxd, int(c[1:]))
+ return maxd
+
+def maxvalof(pixel):
+ """Return the netpbm MAXVAL for a Plan9 pixel format string."""
+
+ bitdepth = bitdepthof(pixel)
+ return (2**bitdepth)-1
+
+def pixmeta(metadata, f) :
+ """Convert (uncompressed) Plan 9 image file to pair of (*metadata*,
+ *pixels*). This is intended to be used by PyPNG format. *metadata*
+ is the metadata returned in a dictionary, *pixels* is an iterator that
+ yields each row in boxed row flat pixel format.
+
+ `f`, the input file, should be cued up to the start of the image data.
+ """
+
+ chan,minx,miny,limx,limy = metadata
+ rows = limy - miny
+ width = limx - minx
+ nchans = len(re.findall('[a-wyz]', chan))
+ alpha = 'a' in chan
+ # Iverson's convention for the win!
+ ncolour = nchans - alpha
+ greyscale = ncolour == 1
+ bitdepth = bitdepthof(chan)
+ maxval = 2**bitdepth - 1
+ # PNG style metadata
+ meta=dict(size=(width,rows), bitdepth=bitdepthof(chan),
+ greyscale=greyscale, alpha=alpha, planes=nchans)
+
+ return itertools.imap(lambda x: itertools.chain(*x),
+ block(unpack(f, rows, width, chan, maxval), width)), meta
+
+def png(out, metadata, f):
+ """Convert to PNG format. `metadata` should be a Plan9 5-tuple; `f`
+ the input file (see :meth:`pixmeta`).
+ """
+
+ import png
+
+ pixels,meta = pixmeta(metadata, f)
+ p = png.Writer(**meta)
+ p.write(out, pixels)
+
+def spam():
+ """Not really spam, but old PAM code, which is in limbo."""
+
+ if nchans == 3 or nchans == 1 :
+ # PGM (P5) or PPM (P6) format.
+ output.write('P%d\n%d %d %d\n' % (5+(nchans==3), width, rows, maxval))
+ else :
+ # PAM format.
+ output.write("""P7
+WIDTH %d
+HEIGHT %d
+DEPTH %d
+MAXVAL %d
+""" % (width, rows, nchans, maxval))
+
+def unpack(f, rows, width, pixel, maxval) :
+ """Unpack `f` into pixels. Assumes the pixel format is such that the depth
+ is either a multiple or a divisor of 8.
+ `f` is assumed to be an iterator that returns blocks of input such
+ that each block contains a whole number of pixels. An iterator is
+ returned that yields each pixel as an n-tuple. `pixel` describes the
+ pixel format using the Plan9 syntax ("k8", "r8g8b8", and so on).
+ """
+
+ def mask(w) :
+ """An integer, to be used as a mask, with bottom `w` bits set to 1."""
+
+ return (1 << w)-1
+
+ def deblock(f, depth, width) :
+ """A "packer" used to convert multiple bytes into single pixels.
+ `depth` is the pixel depth in bits (>= 8), `width` is the row width in
+ pixels.
+ """
+
+ w = depth // 8
+ i = 0
+ for block in f :
+ for i in range(len(block)//w) :
+ p = block[w*i:w*(i+1)]
+ i += w
+ # Convert p to little-endian integer, x
+ x = 0
+ s = 1 # scale
+ for j in p :
+ x += s * ord(j)
+ s <<= 8
+ yield x
+
+ def bitfunge(f, depth, width) :
+ """A "packer" used to convert single bytes into multiple pixels.
+ Depth is the pixel depth (< 8), width is the row width in pixels.
+ """
+
+ for block in f :
+ col = 0
+ for i in block :
+ x = ord(i)
+ for j in range(8/depth) :
+ yield x >> (8 - depth)
+ col += 1
+ if col == width :
+ # A row-end forces a new byte even if we haven't consumed
+ # all of the current byte. Effectively rows are bit-padded
+ # to make a whole number of bytes.
+ col = 0
+ break
+ x <<= depth
+
+ # number of bits in each channel
+ chan = map(int, re.findall(r'\d+', pixel))
+ # type of each channel
+ type = re.findall('[a-z]', pixel)
+
+ depth = sum(chan)
+
+ # According to the value of depth pick a "packer" that either gathers
+ # multiple bytes into a single pixel (for depth >= 8) or split bytes
+ # into several pixels (for depth < 8)
+ if depth >= 8 :
+ #
+ assert depth % 8 == 0
+ packer = deblock
+ else :
+ assert 8 % depth == 0
+ packer = bitfunge
+
+ for x in packer(f, depth, width) :
+ # x is the pixel as an unsigned integer
+ o = []
+ # This is a bit yucky. Extract each channel from the _most_
+ # significant part of x.
+ for j in range(len(chan)) :
+ v = (x >> (depth - chan[j])) & mask(chan[j])
+ x <<= chan[j]
+ if type[j] != 'x' :
+ # scale to maxval
+ v = v * float(maxval) / mask(chan[j])
+ v = int(v+0.5)
+ o.append(v)
+ yield o
+
+
+def decompress(f) :
+ """Decompress a Plan 9 image file. Assumes f is already cued past the
+ initial 'compressed\n' string.
+ """
+
+ r = meta(f.read(60))
+ return r, decomprest(f, r[4])
+
+
+def decomprest(f, rows) :
+ """Iterator that decompresses the rest of a file once the metadata
+ have been consumed."""
+
+ row = 0
+ while row < rows :
+ row,o = deblock(f)
+ yield o
+
+
+def deblock(f) :
+ """Decompress a single block from a compressed Plan 9 image file.
+ Each block starts with 2 decimal strings of 12 bytes each. Yields a
+ sequence of (row, data) pairs where row is the total number of rows
+ processed according to the file format and data is the decompressed
+ data for a set of rows."""
+
+ row = int(f.read(12))
+ size = int(f.read(12))
+ if not (0 <= size <= 6000) :
+ raise 'block has invalid size; not a Plan 9 image file?'
+
+ # Since each block is at most 6000 bytes we may as well read it all in
+ # one go.
+ d = f.read(size)
+ i = 0
+ o = []
+
+ while i < size :
+ x = ord(d[i])
+ i += 1
+ if x & 0x80 :
+ x = (x & 0x7f) + 1
+ lit = d[i:i+x]
+ i += x
+ o.extend(lit)
+ continue
+ # x's high-order bit is 0
+ l = (x >> 2) + 3
+ # Offset is made from bottom 2 bits of x and all 8 bits of next
+ # byte. http://plan9.bell-labs.com/magic/man2html/6/image doesn't
+ # say whether x's 2 bits are most signiificant or least significant.
+ # But it is clear from inspecting a random file,
+ # http://plan9.bell-labs.com/sources/plan9/sys/games/lib/sokoban/images/cargo.bit
+ # that x's 2 bit are most significant.
+ #
+ offset = (x & 3) << 8
+ offset |= ord(d[i])
+ i += 1
+ # Note: complement operator neatly maps (0 to 1023) to (-1 to
+ # -1024). Adding len(o) gives a (non-negative) offset into o from
+ # which to start indexing.
+ offset = ~offset + len(o)
+ if offset < 0 :
+ raise 'byte offset indexes off the begininning of the output buffer; not a Plan 9 image file?'
+ for j in range(l) :
+ o.append(o[offset+j])
+ return row,''.join(o)
+
+def main(argv=None) :
+ if argv is None :
+ argv = sys.argv
+ if len(sys.argv) <= 1 :
+ return convert(sys.stdin)
+ else :
+ return convert(open(argv[1], 'rb'))
+
+if __name__ == '__main__' :
+ sys.exit(main())
diff --git a/build/pypng/pngchunk b/build/pypng/pngchunk
new file mode 100644
index 0000000..b00e4b1
--- /dev/null
+++ b/build/pypng/pngchunk
@@ -0,0 +1,172 @@
+#!/usr/bin/env python
+# $URL: http://pypng.googlecode.com/svn/trunk/code/pngchunk $
+# $Rev: 156 $
+# pngchunk
+# Chunk editing/extraction tool.
+
+import struct
+import warnings
+
+# Local module.
+import png
+
+"""
+pngchunk [--gamma g] [--iccprofile file] [--sigbit b] [-c cHNK!] [-c cHNK:foo] [-c cHNK<file]
+
+The ``-c`` option is used to add or remove chunks. A chunk is specified
+by its 4 byte chunk type. If this is followed by a ``!`` then that
+chunk is removed from the PNG file. If the chunk type is followed by a
+``:`` then the chunk is replaced with the contents of the rest of the
+argument (this is probably only useful if the content is mostly ASCII,
+otherwise it's a pain to quote the contents, otherwise see...). A ``<``
+can be used to take the contents of the chunk from the named file.
+"""
+
+
+def chunk(out, inp, l):
+ """Process the input PNG file to the output, chunk by chunk. Chunks
+ can be inserted, removed, replaced, or sometimes edited. Generally,
+ chunks are not inspected, so pixel data (in the ``IDAT`` chunks)
+ cannot be modified. `l` should be a list of (*chunktype*,
+ *content*) pairs. *chunktype* is usually the type of the PNG chunk,
+ specified as a 4-byte Python string, and *content* is the chunk's
+ content, also as a string; if *content* is ``None`` then *all*
+ chunks of that type will be removed.
+
+ This function *knows* about certain chunk types and will
+ automatically convert from Python friendly representations to
+ string-of-bytes.
+
+ chunktype
+ 'gamma' 'gAMA' float
+ 'sigbit' 'sBIT' int, or tuple of length 1,2 or 3
+
+ Note that the length of the strings used to identify *friendly*
+ chunk types is greater than 4, hence they cannot be confused with
+ canonical chunk types.
+
+ Chunk types, if specified using the 4-byte syntax, need not be
+ official PNG chunks at all. Non-standard chunks can be created.
+ """
+
+ def canonical(p):
+ """Take a pair (*chunktype*, *content*), and return canonical
+ representation (*chunktype*, *content*) where `chunktype` is the
+ 4-byte PNG chunk type and `content` is a string.
+ """
+
+ t,v = p
+ if len(t) == 4:
+ return t,v
+ if t == 'gamma':
+ t = 'gAMA'
+ v = int(round(1e5*v))
+ v = struct.pack('>I', v)
+ elif t == 'sigbit':
+ t = 'sBIT'
+ try:
+ v[0]
+ except TypeError:
+ v = (v,)
+ v = struct.pack('%dB' % len(v), *v)
+ elif t == 'iccprofile':
+ t = 'iCCP'
+ # http://www.w3.org/TR/PNG/#11iCCP
+ v = 'a color profile\x00\x00' + v.encode('zip')
+ else:
+ warnings.warn('Unknown chunk type %r' % t)
+ return t[:4],v
+
+ l = map(canonical, l)
+ # Some chunks automagically replace ones that are present in the
+ # source PNG. There can only be one of each of these chunk types.
+ # Create a 'replace' dictionary to record these chunks.
+ add = []
+ delete = set()
+ replacing = set(['gAMA', 'sBIT', 'PLTE', 'tRNS', 'sPLT', 'IHDR'])
+ replace = dict()
+ for t,v in l:
+ if v is None:
+ delete.add(t)
+ elif t in replacing:
+ replace[t] = v
+ else:
+ add.append((t,v))
+ del l
+ r = png.Reader(file=inp)
+ chunks = r.chunks()
+ def iterchunks():
+ for t,v in chunks:
+ if t in delete:
+ continue
+ if t in replace:
+ yield t,replace[t]
+ del replace[t]
+ continue
+ if t == 'IDAT' and replace:
+ # Insert into the output any chunks that are on the
+ # replace list. We haven't output them yet, because we
+ # didn't see an original chunk of the same type to
+ # replace. Thus the "replace" is actually an "insert".
+ for u,w in replace.items():
+ yield u,w
+ del replace[u]
+ if t == 'IDAT' and add:
+ for item in add:
+ yield item
+ del add[:]
+ yield t,v
+ return png.write_chunks(out, iterchunks())
+
+class Usage(Exception):
+ pass
+
+def main(argv=None):
+ import getopt
+ import re
+ import sys
+
+ if argv is None:
+ argv = sys.argv
+
+ argv = argv[1:]
+
+ try:
+ try:
+ opt,arg = getopt.getopt(argv, 'c:',
+ ['gamma=', 'iccprofile=', 'sigbit='])
+ except getopt.error, msg:
+ raise Usage(msg)
+ k = []
+ for o,v in opt:
+ if o in ['--gamma']:
+ k.append(('gamma', float(v)))
+ if o in ['--sigbit']:
+ k.append(('sigbit', int(v)))
+ if o in ['--iccprofile']:
+ k.append(('iccprofile', open(v, 'rb').read()))
+ if o in ['-c']:
+ type = v[:4]
+ if not re.match('[a-zA-Z]{4}', type):
+ raise Usage('Chunk type must consist of 4 letters.')
+ if v[4] == '!':
+ k.append((type, None))
+ if v[4] == ':':
+ k.append((type, v[5:]))
+ if v[4] == '<':
+ k.append((type, open(v[5:], 'rb').read()))
+ except Usage, err:
+ print >>sys.stderr, (
+ "usage: pngchunk [--gamma d.dd] [--sigbit b] [-c cHNK! | -c cHNK:text-string]")
+ print >>sys.stderr, err.message
+ return 2
+
+ if len(arg) > 0:
+ f = open(arg[0], 'rb')
+ else:
+ f = sys.stdin
+ return chunk(sys.stdout, f, k)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/build/pypng/pnghist b/build/pypng/pnghist
new file mode 100644
index 0000000..4fbbd0a
--- /dev/null
+++ b/build/pypng/pnghist
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+# $URL: http://pypng.googlecode.com/svn/trunk/code/pnghist $
+# $Rev: 153 $
+# PNG Histogram
+# Only really works on grayscale images.
+
+from array import array
+import getopt
+
+import png
+
+def decidemax(level):
+ """Given an array of levels, decide the maximum value to use for the
+ histogram. This is normally chosen to be a bit bigger than the 99th
+ percentile, but if the 100th percentile is not much more (within a
+ factor of 2) then the 100th percentile is chosen.
+ """
+
+ truemax = max(level)
+ sl = level[:]
+ sl.sort(reverse=True)
+ i99 = int(round(len(level)*0.01))
+ if truemax <= 2*sl[i99]:
+ return truemax
+ return 1.05*sl[i99]
+
+def hist(out, inp, verbose=None):
+ """Open the PNG file `inp` and generate a histogram."""
+
+ r = png.Reader(file=inp)
+ x,y,pixels,info = r.asDirect()
+ bitdepth = info['bitdepth']
+ level = [0]*2**bitdepth
+ for row in pixels:
+ for v in row:
+ level[v] += 1
+ maxlevel = decidemax(level)
+
+ h = 100
+ outbitdepth = 8
+ outmaxval = 2**outbitdepth - 1
+ def genrow():
+ for y in range(h):
+ y = h-y-1
+ # :todo: vary typecode according to outbitdepth
+ row = array('B', [0]*len(level))
+ fl = y*maxlevel/float(h)
+ ce = (y+1)*maxlevel/float(h)
+ for x in range(len(row)):
+ if level[x] <= fl:
+ # Relies on row being initialised to all 0
+ continue
+ if level[x] >= ce:
+ row[x] = outmaxval
+ continue
+ frac = (level[x] - fl)/(ce - fl)
+ row[x] = int(round(outmaxval*frac))
+ yield row
+ w = png.Writer(len(level), h, gamma=1.0,
+ greyscale=True, alpha=False, bitdepth=outbitdepth)
+ w.write(out, genrow())
+ if verbose: print >>verbose, level
+
+def main(argv=None):
+ import sys
+
+ if argv is None:
+ argv = sys.argv
+ argv = argv[1:]
+ opt,arg = getopt.getopt(argv, '')
+
+ if len(arg) < 1:
+ f = sys.stdin
+ else:
+ f = open(arg[0])
+ hist(sys.stdout, f)
+
+if __name__ == '__main__':
+ main()
diff --git a/build/pypng/pnglsch b/build/pypng/pnglsch
new file mode 100644
index 0000000..d10d238
--- /dev/null
+++ b/build/pypng/pnglsch
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+# $URL: http://pypng.googlecode.com/svn/trunk/code/pnglsch $
+# $Rev: 107 $
+# pnglsch
+# PNG List Chunks
+
+import png
+
+def list(out, inp):
+ r = png.Reader(file=inp)
+ for t,v in r.chunks():
+ add = ''
+ if len(v) <= 28:
+ add = ' ' + v.encode('hex')
+ print >>out, "%s %10d%s" % (t, len(v), add)
+
+def main(argv=None):
+ import sys
+
+ if argv is None:
+ argv = sys.argv
+ arg = argv[1:]
+
+ if len(arg) > 0:
+ f = open(arg[0], 'rb')
+ else:
+ f = sys.stdin
+ return list(sys.stdout, f)
+
+if __name__ == '__main__':
+ main()
diff --git a/build/pypng/texttopng b/build/pypng/texttopng
new file mode 100644
index 0000000..ab0c690
--- /dev/null
+++ b/build/pypng/texttopng
@@ -0,0 +1,151 @@
+#!/usr/bin/env python
+# $URL: http://pypng.googlecode.com/svn/trunk/code/texttopng $
+# $Rev: 132 $
+# Script to renders text as a PNG image.
+
+from array import array
+import itertools
+
+font = {
+ 32: '0000000000000000',
+ 33: '0010101010001000',
+ 34: '0028280000000000',
+ 35: '0000287c287c2800',
+ 36: '00103c5038147810',
+ 37: '0000644810244c00',
+ 38: '0020502054483400',
+ 39: '0010100000000000',
+ 40: '0008101010101008',
+ 41: '0020101010101020',
+ 42: '0010543838541000',
+ 43: '000010107c101000',
+ 44: '0000000000301020',
+ 45: '000000007c000000',
+ 46: '0000000000303000',
+ 47: '0000040810204000',
+ 48: '0038445454443800',
+ 49: '0008180808080800',
+ 50: '0038043840407c00',
+ 51: '003c041804043800',
+ 52: '00081828487c0800',
+ 53: '0078407804047800',
+ 54: '0038407844443800',
+ 55: '007c040810101000',
+ 56: '0038443844443800',
+ 57: '0038443c04040400',
+ 58: '0000303000303000',
+ 59: '0000303000301020',
+ 60: '0004081020100804',
+ 61: '0000007c007c0000',
+ 62: '0040201008102040',
+ 63: '0038440810001000',
+ 64: '00384c545c403800',
+ 65: '0038447c44444400',
+ 66: '0078447844447800',
+ 67: '0038444040443800',
+ 68: '0070484444487000',
+ 69: '007c407840407c00',
+ 70: '007c407840404000',
+ 71: '003844405c443c00',
+ 72: '0044447c44444400',
+ 73: '0038101010103800',
+ 74: '003c040404443800',
+ 75: '0044487048444400',
+ 76: '0040404040407c00',
+ 77: '006c545444444400',
+ 78: '004464544c444400',
+ 79: '0038444444443800',
+ 80: '0078447840404000',
+ 81: '0038444444443c02',
+ 82: '0078447844444400',
+ 83: '0038403804047800',
+ 84: '007c101010101000',
+ 85: '0044444444443c00',
+ 86: '0044444444281000',
+ 87: '0044445454543800',
+ 88: '0042241818244200',
+ 89: '0044443810101000',
+ 90: '007c081020407c00',
+ 91: '0038202020202038',
+ 92: '0000402010080400',
+ 93: '0038080808080838',
+ 94: '0010284400000000',
+ 95: '000000000000fe00',
+ 96: '0040200000000000',
+ 97: '000038043c443c00',
+ 98: '0040784444447800',
+ 99: '0000384040403800',
+ 100: '00043c4444443c00',
+ 101: '000038447c403c00',
+ 102: '0018203820202000',
+ 103: '00003c44443c0438',
+ 104: '0040784444444400',
+ 105: '0010003010101000',
+ 106: '0010003010101020',
+ 107: '0040404870484400',
+ 108: '0030101010101000',
+ 109: '0000385454444400',
+ 110: '0000784444444400',
+ 111: '0000384444443800',
+ 112: '0000784444784040',
+ 113: '00003c44443c0406',
+ 114: '00001c2020202000',
+ 115: '00003c4038047800',
+ 116: '0020203820201800',
+ 117: '0000444444443c00',
+ 118: '0000444444281000',
+ 119: '0000444454543800',
+ 120: '0000442810284400',
+ 121: '00004444443c0438',
+ 122: '00007c0810207c00',
+ 123: '0018202060202018',
+ 124: '0010101000101010',
+ 125: '003008080c080830',
+ 126: '0020540800000000',
+}
+
+def char(i):
+ """Get image data for the character `i` (a one character string).
+ Returned as a list of rows. Each row is a tuple containing the
+ packed pixels.
+ """
+
+ i = ord(i)
+ if i not in font:
+ return [(0,)]*8
+ return map(lambda row: (ord(row),), font[i].decode('hex'))
+
+def texttoraster(m):
+ """Convert string *m* to a raster image (by rendering it using the
+ font in *font*). A triple of (*width*, *height*, *pixels*) is
+ returned; *pixels* is in boxed row packed pixel format.
+ """
+
+ # Assumes monospaced font.
+ x = 8*len(m)
+ y = 8
+ return x,y,itertools.imap(lambda row: itertools.chain(*row),
+ zip(*map(char, m)))
+
+
+def render(message, out):
+ import png
+
+ x,y,pixels = texttoraster(message)
+ w = png.Writer(x, y, greyscale=True, bitdepth=1)
+ w.write_packed(out, pixels)
+ out.flush()
+
+def main(argv=None):
+ import sys
+
+ if argv is None:
+ argv = sys.argv
+ if len(argv) > 1:
+ message = argv[1]
+ else:
+ message = sys.stdin.read()
+ render(message, sys.stdout)
+
+if __name__ == '__main__':
+ main()
diff --git a/client.mk b/client.mk
new file mode 100644
index 0000000..cb6ec1d
--- /dev/null
+++ b/client.mk
@@ -0,0 +1,466 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Build a comm application (Mozilla calendar, mail or suite).
+#
+# To build a tree,
+# 1. hg clone http://hg.mozilla.org/comm-central comm
+# 2. cd comm
+# 3. python client.py checkout
+# 4. create your .mozconfig file with
+# ac_add_options --enable-application=suite
+# (or mail, or calendar)
+# 5. gmake -f client.mk
+#
+# Other targets (gmake -f client.mk [targets...]),
+# build
+# clean
+# distclean
+#
+# See http://developer.mozilla.org/en/Build_Documentation for
+# more information.
+#
+# Options:
+# MOZ_BUILD_PROJECTS - Build multiple projects in subdirectories
+# of MOZ_OBJDIR
+# MOZ_OBJDIR - Destination object directory
+# MOZ_MAKE_FLAGS - Flags to pass to $(MAKE)
+# MOZ_PREFLIGHT_ALL } - Makefiles to run before any project in
+# MOZ_PREFLIGHT } MOZ_BUILD_PROJECTS, before each project, after
+# MOZ_POSTFLIGHT } each project, and after all projects; these
+# MOZ_POSTFLIGHT_ALL } variables contain space-separated lists
+# MOZ_UNIFY_BDATE - Set to use the same bdate for each project in
+# MOZ_BUILD_PROJECTS
+#
+#######################################################################
+# Defines
+
+comma := ,
+
+CWD := $(CURDIR)
+ifneq (1,$(words $(CWD)))
+$(error The platform directory cannot be located in a path with spaces.)
+endif
+
+ifeq "$(CWD)" "/"
+CWD := /.
+endif
+
+ifndef TOPSRCDIR
+ifeq (,$(wildcard client.mk))
+TOPSRCDIR := $(patsubst %/,%,$(dir $(MAKEFILE_LIST)))
+else
+TOPSRCDIR := $(CWD)
+endif
+endif
+
+SH := /bin/sh
+PERL ?= perl
+PYTHON ?= $(shell which python2.7 > /dev/null 2>&1 && echo python2.7 || echo python)
+
+CONFIG_GUESS_SCRIPT := $(wildcard $(TOPSRCDIR)/build/autoconf/config.guess)
+ifdef CONFIG_GUESS_SCRIPT
+ CONFIG_GUESS := $(shell $(CONFIG_GUESS_SCRIPT))
+endif
+
+
+####################################
+# Sanity checks
+
+# Windows checks.
+ifneq (,$(findstring mingw,$(CONFIG_GUESS)))
+
+# check for CRLF line endings
+ifneq (0,$(shell $(PERL) -e 'binmode(STDIN); while (<STDIN>) { if (/\r/) { print "1"; exit } } print "0"' < $(TOPSRCDIR)/client.mk))
+$(error This source tree appears to have Windows-style line endings. To \
+convert it to Unix-style line endings, check \
+"https://developer.mozilla.org/en-US/docs/Developer_Guide/Mozilla_build_FAQ\#Win32-specific_questions" \
+for a workaround of this issue.)
+endif
+endif
+
+####################################
+# Load mozconfig Options
+
+# See build pages, http://www.mozilla.org/build/ for how to set up mozconfig.
+
+MOZCONFIG_LOADER := build/autoconf/mozconfig2client-mk
+
+define CR
+
+
+endef
+
+# As $(shell) doesn't preserve newlines, use sed to replace them with an
+# unlikely sequence (||), which is then replaced back to newlines by make
+# before evaluation. $(shell) replacing newlines with spaces, || is always
+# followed by a space (since sed doesn't remove newlines), except on the
+# last line, so replace both '|| ' and '||'.
+# Also, make MOZ_PGO available to mozconfig when passed on make command line.
+MOZCONFIG_CONTENT := $(subst ||,$(CR),$(subst || ,$(CR),$(shell MOZ_PGO=$(MOZ_PGO) $(TOPSRCDIR)/$(MOZCONFIG_LOADER) $(TOPSRCDIR) | sed 's/$$/||/')))
+$(eval $(MOZCONFIG_CONTENT))
+
+export FOUND_MOZCONFIG
+
+# As '||' was used as a newline separator, it means it's not occurring in
+# lines themselves. It can thus safely be used to replaces normal spaces,
+# to then replace newlines with normal spaces. This allows to get a list
+# of mozconfig output lines.
+MOZCONFIG_OUT_LINES := $(subst $(CR), ,$(subst $(NULL) $(NULL),||,$(MOZCONFIG_CONTENT)))
+# Filter-out comments from those lines.
+START_COMMENT = \#
+MOZCONFIG_OUT_FILTERED := $(filter-out $(START_COMMENT)%,$(MOZCONFIG_OUT_LINES))
+
+ifdef MOZ_PGO
+export MOZ_PGO
+endif
+
+# Automatically add -jN to make flags if not defined. N defaults to number of cores.
+ifeq (,$(findstring -j,$(MOZ_MAKE_FLAGS)))
+ cores=$(shell $(PYTHON) -c 'import multiprocessing; print(multiprocessing.cpu_count())')
+ MOZ_MAKE_FLAGS += -j$(cores)
+endif
+
+
+ifndef MOZ_OBJDIR
+ MOZ_OBJDIR = obj-$(CONFIG_GUESS)
+else
+# On Windows Pymake builds check MOZ_OBJDIR doesn't start with "/"
+ ifneq (,$(findstring mingw,$(CONFIG_GUESS)))
+ ifeq (1_a,$(.PYMAKE)_$(firstword a$(subst /, ,$(MOZ_OBJDIR))))
+ $(error For Windows Pymake builds, MOZ_OBJDIR must be a Windows [and not MSYS] style path.)
+ endif
+ endif
+endif
+
+ifdef MOZ_BUILD_PROJECTS
+
+ifdef MOZ_CURRENT_PROJECT
+ OBJDIR = $(MOZ_OBJDIR)/$(MOZ_CURRENT_PROJECT)
+ MOZ_MAKE = $(MAKE) $(MOZ_MAKE_FLAGS) -C $(OBJDIR)
+ BUILD_PROJECT_ARG = MOZ_BUILD_APP=$(MOZ_CURRENT_PROJECT)
+else
+ OBJDIR = $(error Cannot find the OBJDIR when MOZ_CURRENT_PROJECT is not set.)
+ MOZ_MAKE = $(error Cannot build in the OBJDIR when MOZ_CURRENT_PROJECT is not set.)
+endif
+
+else # MOZ_BUILD_PROJECTS
+
+OBJDIR = $(MOZ_OBJDIR)
+MOZ_MAKE = $(MAKE) $(MOZ_MAKE_FLAGS) -C $(OBJDIR)
+
+endif # MOZ_BUILD_PROJECTS
+
+# If we have a MOZ_OBJDIR that's set from the environment, ensure that it is an
+# absolute path.
+ifdef MOZ_OBJDIR
+MOZ_OBJDIR := $(shell $(PYTHON) -c "import os.path; print(os.path.join(\"$(TOPSRCDIR)\", \"$(MOZ_OBJDIR)\").replace('\\\\','/'))")
+endif
+
+# 'configure' scripts generated by autoconf.
+CONFIGURES := $(TOPSRCDIR)/configure
+CONFIGURES += $(TOPSRCDIR)/platform/configure
+CONFIGURES += $(TOPSRCDIR)/platform/js/src/configure
+
+# Make targets that are going to be passed to the real build system
+OBJDIR_TARGETS = install export libs clean realclean distclean maybe_clobber_profiledbuild upload sdk installer package package-compare stage-package source-package l10n-check automation/build
+
+#######################################################################
+# Rules
+
+# The default rule is build
+build::
+
+# Define mkdir
+include $(TOPSRCDIR)/config/makefiles/makeutils.mk
+include $(TOPSRCDIR)/config/makefiles/autotargets.mk
+
+# Create a makefile containing the mk_add_options values from mozconfig,
+# but only do so when OBJDIR is defined (see further above).
+ifdef MOZ_BUILD_PROJECTS
+ifdef MOZ_CURRENT_PROJECT
+WANT_MOZCONFIG_MK = 1
+else
+WANT_MOZCONFIG_MK =
+endif
+else
+WANT_MOZCONFIG_MK = 1
+endif
+
+ifdef WANT_MOZCONFIG_MK
+# For now, only output "export" lines from mozconfig2client-mk output.
+MOZCONFIG_MK_LINES := $(filter export||%,$(MOZCONFIG_OUT_LINES))
+$(OBJDIR)/.mozconfig.mk: $(FOUND_MOZCONFIG) $(call mkdir_deps,$(OBJDIR))
+ $(if $(MOZCONFIG_MK_LINES),( $(foreach line,$(MOZCONFIG_MK_LINES), echo "$(subst ||, ,$(line))";) )) > $@
+ifdef MOZ_CURRENT_PROJECT
+ echo export MOZ_CURRENT_PROJECT=$(MOZ_CURRENT_PROJECT) >> $@
+endif
+
+# Include that makefile so that it is created. This should not actually change
+# the environment since MOZCONFIG_CONTENT, which MOZCONFIG_OUT_LINES derives
+# from, has already been eval'ed.
+include $(OBJDIR)/.mozconfig.mk
+endif
+
+# UPLOAD_EXTRA_FILES is appended to and exported from mozconfig, which makes
+# submakes as well as configure add even more to that, so just unexport it
+# for submakes to pick it from .mozconfig.mk and for configure to pick it
+# from mach environment.
+unexport UPLOAD_EXTRA_FILES
+
+# These targets are candidates for auto-running client.py
+
+ifeq (01,$(MAKELEVEL)$(if $(ALWAYS_RUN_CLIENT_PY),1,))
+
+build profiledbuild configure:: run_client_py
+ $(MAKE) -f $(TOPSRCDIR)/client.mk $@
+else
+
+
+# Print out any options loaded from mozconfig.
+all build clean distclean export libs install realclean::
+ifneq (,$(strip $(MOZCONFIG_OUT_FILTERED)))
+ $(info Adding client.mk options from $(FOUND_MOZCONFIG):)
+ $(foreach line,$(MOZCONFIG_OUT_FILTERED),$(info $(NULL) $(NULL) $(NULL) $(NULL) $(subst ||, ,$(line))))
+endif
+
+# Windows equivalents
+build_all: build
+clobber clobber_all: clean
+
+# Do everything from scratch
+everything: clean build
+
+####################################
+# Profile-Guided Optimization
+# To use this, you should set the following variables in your mozconfig
+# mk_add_options PROFILE_GEN_SCRIPT=/path/to/profile-script
+#
+# The profile script should exercise the functionality to be included
+# in the profile feedback.
+#
+# This is up here, outside of the MOZ_CURRENT_PROJECT logic so that this
+# is usable in multi-pass builds, where you might not have a runnable
+# application until all the build passes and postflight scripts have run.
+ifdef MOZ_OBJDIR
+ PGO_OBJDIR = $(MOZ_OBJDIR)
+else
+ PGO_OBJDIR := $(TOPSRCDIR)
+endif
+
+profiledbuild::
+ $(MAKE) -f $(TOPSRCDIR)/client.mk build MOZ_PROFILE_GENERATE=1
+ $(MAKE) -C $(PGO_OBJDIR) stage-package
+ OBJDIR=${PGO_OBJDIR} $(PROFILE_GEN_SCRIPT)
+ $(MAKE) -f $(TOPSRCDIR)/client.mk maybe_clobber_profiledbuild
+ $(MAKE) -f $(TOPSRCDIR)/client.mk build MOZ_PROFILE_USE=1
+
+#####################################################
+# Build date unification
+
+ifdef MOZ_UNIFY_BDATE
+ifndef MOZ_BUILD_DATE
+ifdef MOZ_BUILD_PROJECTS
+MOZ_BUILD_DATE = $(shell $(PYTHON) $(TOPSRCDIR)/platform/build/variables.py buildid_header | awk '{print $$3}')
+export MOZ_BUILD_DATE
+endif
+endif
+endif
+
+#####################################################
+# Preflight, before building any project
+
+build preflight_all::
+ifeq (,$(MOZ_CURRENT_PROJECT)$(if $(MOZ_PREFLIGHT_ALL),,1))
+# Don't run preflight_all for individual projects in multi-project builds
+# (when MOZ_CURRENT_PROJECT is set.)
+ifndef MOZ_BUILD_PROJECTS
+# Building a single project, OBJDIR is usable.
+ set -e; \
+ for mkfile in $(MOZ_PREFLIGHT_ALL); do \
+ $(MAKE) -f $(TOPSRCDIR)/$$mkfile preflight_all TOPSRCDIR=$(TOPSRCDIR) OBJDIR=$(OBJDIR) MOZ_OBJDIR=$(MOZ_OBJDIR); \
+ done
+else
+# OBJDIR refers to the project-specific OBJDIR, which is not available at
+# this point when building multiple projects. Only MOZ_OBJDIR is available.
+ set -e; \
+ for mkfile in $(MOZ_PREFLIGHT_ALL); do \
+ $(MAKE) -f $(TOPSRCDIR)/$$mkfile preflight_all TOPSRCDIR=$(TOPSRCDIR) MOZ_OBJDIR=$(MOZ_OBJDIR) MOZ_BUILD_PROJECTS='$(MOZ_BUILD_PROJECTS)'; \
+ done
+endif
+endif
+
+# If we're building multiple projects, but haven't specified which project,
+# loop through them.
+
+ifeq (,$(MOZ_CURRENT_PROJECT)$(if $(MOZ_BUILD_PROJECTS),,1))
+configure build preflight postflight $(OBJDIR_TARGETS)::
+ set -e; \
+ for app in $(MOZ_BUILD_PROJECTS); do \
+ $(MAKE) -f $(TOPSRCDIR)/client.mk $@ MOZ_CURRENT_PROJECT=$$app; \
+ done
+
+else
+
+# MOZ_CURRENT_PROJECT: either doing a single-project build, or building an
+# individual project in a multi-project build.
+
+####################################
+# Configure
+
+MAKEFILE = $(wildcard $(OBJDIR)/Makefile)
+CONFIG_STATUS = $(wildcard $(OBJDIR)/config.status)
+CONFIG_CACHE = $(wildcard $(OBJDIR)/config.cache)
+
+EXTRA_CONFIG_DEPS := \
+ $(TOPSRCDIR)/aclocal.m4 \
+ $(TOPSRCDIR)/platform/aclocal.m4 \
+ $(TOPSRCDIR)/platform/old-configure.in \
+ $(wildcard $(TOPSRCDIR)/platform/build/autoconf/*.m4) \
+ $(TOPSRCDIR)/platform/js/src/aclocal.m4 \
+ $(TOPSRCDIR)/platform/js/src/old-configure.in \
+ $(NULL)
+
+$(CONFIGURES): %: %.in $(EXTRA_CONFIG_DEPS)
+ @echo Generating $@
+ sed '1,/^divert/d' $< > $@
+ chmod +x $@
+
+CONFIG_STATUS_DEPS := \
+ $(wildcard $(TOPSRCDIR)/*/confvars.sh) \
+ $(wildcard $(TOPSRCDIR)/*/configure.in) \
+ $(wildcard $(TOPSRCDIR)/*/config/version.txt) \
+ $(wildcard $(CONFIGURES)) \
+ $(wildcard $(TOPSRCDIR)/platform/nsprpub/configure) \
+ $(wildcard $(TOPSRCDIR)/platform/config/milestone.txt) \
+ $(wildcard $(TOPSRCDIR)/platform/ldap/sdks/c-sdk/configure) \
+ $(wildcard $(addsuffix confvars.sh,$(wildcard $(TOPSRCDIR)/*/))) \
+ $(NULL)
+
+CONFIGURE_ENV_ARGS += \
+ MAKE='$(MAKE)' \
+ $(NULL)
+
+# configure uses the program name to determine @srcdir@. Calling it without
+# $(TOPSRCDIR) will set @srcdir@ to '.'; otherwise, it is set to the full
+# path of $(TOPSRCDIR).
+ifeq ($(TOPSRCDIR),$(OBJDIR))
+ CONFIGURE = ./configure
+else
+ CONFIGURE = $(TOPSRCDIR)/configure
+endif
+
+configure-files: $(CONFIGURES)
+
+configure-preqs = \
+ configure-files \
+ $(call mkdir_deps,$(OBJDIR)) \
+ $(if $(MOZ_BUILD_PROJECTS),$(call mkdir_deps,$(MOZ_OBJDIR))) \
+ $(NULL)
+
+configure:: $(configure-preqs)
+ @echo cd $(OBJDIR);
+ @echo $(CONFIGURE) $(CONFIGURE_ARGS)
+ @cd $(OBJDIR) && $(BUILD_PROJECT_ARG) $(CONFIGURE_ENV_ARGS) $(CONFIGURE) $(CONFIGURE_ARGS) \
+ || ( echo '*** Fix above errors and then restart with\
+ "$(MAKE) -f client.mk build"' && exit 1 )
+ @touch $(OBJDIR)/Makefile
+
+ifneq (,$(MAKEFILE))
+$(OBJDIR)/Makefile: $(OBJDIR)/config.status
+
+$(OBJDIR)/config.status: $(CONFIG_STATUS_DEPS)
+else
+$(OBJDIR)/Makefile: $(CONFIG_STATUS_DEPS)
+endif
+ @$(MAKE) -f $(TOPSRCDIR)/client.mk configure
+
+ifneq (,$(CONFIG_STATUS))
+$(OBJDIR)/config/autoconf.mk: $(TOPSRCDIR)/config/autoconf.mk.in
+ $(PYTHON) $(OBJDIR)/config.status -n --file=$(OBJDIR)/config/autoconf.mk
+endif
+
+
+####################################
+# Preflight
+
+build preflight::
+ifdef MOZ_PREFLIGHT
+ set -e; \
+ for mkfile in $(MOZ_PREFLIGHT); do \
+ $(MAKE) -f $(TOPSRCDIR)/$$mkfile preflight TOPSRCDIR=$(TOPSRCDIR) OBJDIR=$(OBJDIR) MOZ_OBJDIR=$(MOZ_OBJDIR); \
+ done
+endif
+
+####################################
+# Build it
+
+build:: $(OBJDIR)/Makefile $(OBJDIR)/config.status
+ +$(MOZ_MAKE)
+
+####################################
+# Other targets
+
+# Pass these target onto the real build system
+$(OBJDIR_TARGETS):: $(OBJDIR)/Makefile $(OBJDIR)/config.status
+ +$(MOZ_MAKE) $@
+
+####################################
+# Postflight
+
+build postflight::
+ifdef MOZ_POSTFLIGHT
+ set -e; \
+ for mkfile in $(MOZ_POSTFLIGHT); do \
+ $(MAKE) -f $(TOPSRCDIR)/$$mkfile postflight TOPSRCDIR=$(TOPSRCDIR) OBJDIR=$(OBJDIR) MOZ_OBJDIR=$(MOZ_OBJDIR); \
+ done
+endif
+
+endif # MOZ_CURRENT_PROJECT
+endif # RAN_CLIENT_PY
+
+####################################
+# Postflight, after building all projects
+
+build postflight_all::
+ifeq (,$(MOZ_CURRENT_PROJECT)$(if $(MOZ_POSTFLIGHT_ALL),,1))
+# Don't run postflight_all for individual projects in multi-project builds
+# (when MOZ_CURRENT_PROJECT is set.)
+ifndef MOZ_BUILD_PROJECTS
+# Building a single project, OBJDIR is usable.
+ set -e; \
+ for mkfile in $(MOZ_POSTFLIGHT_ALL); do \
+ $(MAKE) -f $(TOPSRCDIR)/$$mkfile postflight_all TOPSRCDIR=$(TOPSRCDIR) OBJDIR=$(OBJDIR) MOZ_OBJDIR=$(MOZ_OBJDIR); \
+ done
+else
+# OBJDIR refers to the project-specific OBJDIR, which is not available at
+# this point when building multiple projects. Only MOZ_OBJDIR is available.
+ set -e; \
+ for mkfile in $(MOZ_POSTFLIGHT_ALL); do \
+ $(MAKE) -f $(TOPSRCDIR)/$$mkfile postflight_all TOPSRCDIR=$(TOPSRCDIR) MOZ_OBJDIR=$(MOZ_OBJDIR) MOZ_BUILD_PROJECTS='$(MOZ_BUILD_PROJECTS)'; \
+ done
+endif
+endif
+
+cleansrcdir:
+ @cd $(TOPSRCDIR); \
+ if [ -f Makefile ]; then \
+ $(MAKE) distclean ; \
+ else \
+ echo 'Removing object files from srcdir...'; \
+ rm -fr `find . -type d \( -name .deps -print -o -name CVS \
+ -o -exec test ! -d {}/CVS \; \) -prune \
+ -o \( -name '*.[ao]' -o -name '*.so' \) -type f -print`; \
+ build/autoconf/clean-config.sh; \
+ fi;
+
+echo-variable-%:
+ @echo $($*)
+
+# This makefile doesn't support parallel execution. It does pass
+# MOZ_MAKE_FLAGS to sub-make processes, so they will correctly execute
+# in parallel.
+.NOTPARALLEL:
+
+.PHONY: checkout co real_checkout build profiledbuild cleansrcdir pull_all build_all clobber clobber_all pull_and_build_all everything configure preflight_all preflight postflight postflight_all $(OBJDIR_TARGETS)
diff --git a/config/baseconfig.mk b/config/baseconfig.mk
new file mode 100644
index 0000000..ffc2e08
--- /dev/null
+++ b/config/baseconfig.mk
@@ -0,0 +1,16 @@
+# This file is normally included by autoconf.mk, but it is also used
+# directly in python/mozbuild/mozbuild/base.py for gmake validation.
+# We thus use INCLUDED_AUTOCONF_MK to enable/disable some parts depending
+# whether a normal build is happening or whether the check is running.
+
+# When mach wants to know if we're to use mozmake, it runs:
+# make -f topsrcdir/config/baseconfig.mk
+# The first word of MAKEFILE_LIST is the main file we're running. Grabbing the
+# parent of that directory therefore gets us the topsrcdir of comm-central,
+# whence we get the mozilla directory to run the "real" baseconfig.mk logic.
+ifndef INCLUDED_AUTOCONF_MK
+topsrcdir := $(dir $(firstword $(MAKEFILE_LIST)))..
+endif
+
+MOZILLA_SRCDIR = $(topsrcdir)/platform
+include $(MOZILLA_SRCDIR)/config/baseconfig.mk
diff --git a/config/config.mk b/config/config.mk
new file mode 100644
index 0000000..c65455b
--- /dev/null
+++ b/config/config.mk
@@ -0,0 +1,7 @@
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Just use mozilla-central's copy of config.mk now.
+include $(MOZILLA_DIR)/config/config.mk
diff --git a/config/configobj.py b/config/configobj.py
new file mode 100644
index 0000000..97b252c
--- /dev/null
+++ b/config/configobj.py
@@ -0,0 +1,2279 @@
+# configobj.py
+# A config file reader/writer that supports nested sections in config files.
+# Copyright (C) 2005-2006 Michael Foord, Nicola Larosa
+# E-mail: fuzzyman AT voidspace DOT org DOT uk
+# nico AT tekNico DOT net
+
+# ConfigObj 4
+# http://www.voidspace.org.uk/python/configobj.html
+
+# Released subject to the BSD License
+# Please see http://www.voidspace.org.uk/python/license.shtml
+
+# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
+# For information about bugfixes, updates and support, please join the
+# ConfigObj mailing list:
+# http://lists.sourceforge.net/lists/listinfo/configobj-develop
+# Comments, suggestions and bug reports welcome.
+
+from __future__ import generators
+
+import sys
+INTP_VER = sys.version_info[:2]
+if INTP_VER < (2, 2):
+ raise RuntimeError("Python v.2.2 or later needed")
+
+import os, re
+compiler = None
+try:
+ import compiler
+except ImportError:
+ # for IronPython
+ pass
+from types import StringTypes
+from warnings import warn
+try:
+ from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
+except ImportError:
+ # Python 2.2 does not have these
+ # UTF-8
+ BOM_UTF8 = '\xef\xbb\xbf'
+ # UTF-16, little endian
+ BOM_UTF16_LE = '\xff\xfe'
+ # UTF-16, big endian
+ BOM_UTF16_BE = '\xfe\xff'
+ if sys.byteorder == 'little':
+ # UTF-16, native endianness
+ BOM_UTF16 = BOM_UTF16_LE
+ else:
+ # UTF-16, native endianness
+ BOM_UTF16 = BOM_UTF16_BE
+
+# A dictionary mapping BOM to
+# the encoding to decode with, and what to set the
+# encoding attribute to.
+BOMS = {
+ BOM_UTF8: ('utf_8', None),
+ BOM_UTF16_BE: ('utf16_be', 'utf_16'),
+ BOM_UTF16_LE: ('utf16_le', 'utf_16'),
+ BOM_UTF16: ('utf_16', 'utf_16'),
+ }
+# All legal variants of the BOM codecs.
+# TODO: the list of aliases is not meant to be exhaustive, is there a
+# better way ?
+BOM_LIST = {
+ 'utf_16': 'utf_16',
+ 'u16': 'utf_16',
+ 'utf16': 'utf_16',
+ 'utf-16': 'utf_16',
+ 'utf16_be': 'utf16_be',
+ 'utf_16_be': 'utf16_be',
+ 'utf-16be': 'utf16_be',
+ 'utf16_le': 'utf16_le',
+ 'utf_16_le': 'utf16_le',
+ 'utf-16le': 'utf16_le',
+ 'utf_8': 'utf_8',
+ 'u8': 'utf_8',
+ 'utf': 'utf_8',
+ 'utf8': 'utf_8',
+ 'utf-8': 'utf_8',
+ }
+
+# Map of encodings to the BOM to write.
+BOM_SET = {
+ 'utf_8': BOM_UTF8,
+ 'utf_16': BOM_UTF16,
+ 'utf16_be': BOM_UTF16_BE,
+ 'utf16_le': BOM_UTF16_LE,
+ None: BOM_UTF8
+ }
+
+try:
+ from validate import VdtMissingValue
+except ImportError:
+ VdtMissingValue = None
+
+try:
+ enumerate
+except NameError:
+ def enumerate(obj):
+ """enumerate for Python 2.2."""
+ i = -1
+ for item in obj:
+ i += 1
+ yield i, item
+
+try:
+ True, False
+except NameError:
+ True, False = 1, 0
+
+
+__version__ = '4.4.0'
+
+__revision__ = '$Id: configobj.py,v 3.5 2007/07/02 18:20:24 benjamin%smedbergs.us Exp $'
+
+__docformat__ = "restructuredtext en"
+
+__all__ = (
+ '__version__',
+ 'DEFAULT_INDENT_TYPE',
+ 'DEFAULT_INTERPOLATION',
+ 'ConfigObjError',
+ 'NestingError',
+ 'ParseError',
+ 'DuplicateError',
+ 'ConfigspecError',
+ 'ConfigObj',
+ 'SimpleVal',
+ 'InterpolationError',
+ 'InterpolationLoopError',
+ 'MissingInterpolationOption',
+ 'RepeatSectionError',
+ 'UnreprError',
+ 'UnknownType',
+ '__docformat__',
+ 'flatten_errors',
+)
+
+DEFAULT_INTERPOLATION = 'configparser'
+DEFAULT_INDENT_TYPE = ' '
+MAX_INTERPOL_DEPTH = 10
+
+OPTION_DEFAULTS = {
+ 'interpolation': True,
+ 'raise_errors': False,
+ 'list_values': True,
+ 'create_empty': False,
+ 'file_error': False,
+ 'configspec': None,
+ 'stringify': True,
+ # option may be set to one of ('', ' ', '\t')
+ 'indent_type': None,
+ 'encoding': None,
+ 'default_encoding': None,
+ 'unrepr': False,
+ 'write_empty_values': False,
+}
+
+
+def getObj(s):
+ s = "a=" + s
+ if compiler is None:
+ raise ImportError('compiler module not available')
+ p = compiler.parse(s)
+ return p.getChildren()[1].getChildren()[0].getChildren()[1]
+
+class UnknownType(Exception):
+ pass
+
+class Builder:
+
+ def build(self, o):
+ m = getattr(self, 'build_' + o.__class__.__name__, None)
+ if m is None:
+ raise UnknownType(o.__class__.__name__)
+ return m(o)
+
+ def build_List(self, o):
+ return map(self.build, o.getChildren())
+
+ def build_Const(self, o):
+ return o.value
+
+ def build_Dict(self, o):
+ d = {}
+ i = iter(map(self.build, o.getChildren()))
+ for el in i:
+ d[el] = i.next()
+ return d
+
+ def build_Tuple(self, o):
+ return tuple(self.build_List(o))
+
+ def build_Name(self, o):
+ if o.name == 'None':
+ return None
+ if o.name == 'True':
+ return True
+ if o.name == 'False':
+ return False
+
+ # An undefinted Name
+ raise UnknownType('Undefined Name')
+
+ def build_Add(self, o):
+ real, imag = map(self.build_Const, o.getChildren())
+ try:
+ real = float(real)
+ except TypeError:
+ raise UnknownType('Add')
+ if not isinstance(imag, complex) or imag.real != 0.0:
+ raise UnknownType('Add')
+ return real+imag
+
+ def build_Getattr(self, o):
+ parent = self.build(o.expr)
+ return getattr(parent, o.attrname)
+
+ def build_UnarySub(self, o):
+ return -self.build_Const(o.getChildren()[0])
+
+ def build_UnaryAdd(self, o):
+ return self.build_Const(o.getChildren()[0])
+
+def unrepr(s):
+ if not s:
+ return s
+ return Builder().build(getObj(s))
+
+def _splitlines(instring):
+ """Split a string on lines, without losing line endings or truncating."""
+
+
+class ConfigObjError(SyntaxError):
+ """
+ This is the base class for all errors that ConfigObj raises.
+ It is a subclass of SyntaxError.
+ """
+ def __init__(self, message='', line_number=None, line=''):
+ self.line = line
+ self.line_number = line_number
+ self.message = message
+ SyntaxError.__init__(self, message)
+
+class NestingError(ConfigObjError):
+ """
+ This error indicates a level of nesting that doesn't match.
+ """
+
+class ParseError(ConfigObjError):
+ """
+ This error indicates that a line is badly written.
+ It is neither a valid ``key = value`` line,
+ nor a valid section marker line.
+ """
+
+class DuplicateError(ConfigObjError):
+ """
+ The keyword or section specified already exists.
+ """
+
+class ConfigspecError(ConfigObjError):
+ """
+ An error occurred whilst parsing a configspec.
+ """
+
+class InterpolationError(ConfigObjError):
+ """Base class for the two interpolation errors."""
+
+class InterpolationLoopError(InterpolationError):
+ """Maximum interpolation depth exceeded in string interpolation."""
+
+ def __init__(self, option):
+ InterpolationError.__init__(
+ self,
+ 'interpolation loop detected in value "%s".' % option)
+
+class RepeatSectionError(ConfigObjError):
+ """
+ This error indicates additional sections in a section with a
+ ``__many__`` (repeated) section.
+ """
+
+class MissingInterpolationOption(InterpolationError):
+ """A value specified for interpolation was missing."""
+
+ def __init__(self, option):
+ InterpolationError.__init__(
+ self,
+ 'missing option "%s" in interpolation.' % option)
+
+class UnreprError(ConfigObjError):
+ """An error parsing in unrepr mode."""
+
+
+class InterpolationEngine(object):
+ """
+ A helper class to help perform string interpolation.
+
+ This class is an abstract base class; its descendants perform
+ the actual work.
+ """
+
+ # compiled regexp to use in self.interpolate()
+ _KEYCRE = re.compile(r"%\(([^)]*)\)s")
+
+ def __init__(self, section):
+ # the Section instance that "owns" this engine
+ self.section = section
+
+ def interpolate(self, key, value):
+ def recursive_interpolate(key, value, section, backtrail):
+ """The function that does the actual work.
+
+ ``value``: the string we're trying to interpolate.
+ ``section``: the section in which that string was found
+ ``backtrail``: a dict to keep track of where we've been,
+ to detect and prevent infinite recursion loops
+
+ This is similar to a depth-first-search algorithm.
+ """
+ # Have we been here already?
+ if backtrail.has_key((key, section.name)):
+ # Yes - infinite loop detected
+ raise InterpolationLoopError(key)
+ # Place a marker on our backtrail so we won't come back here again
+ backtrail[(key, section.name)] = 1
+
+ # Now start the actual work
+ match = self._KEYCRE.search(value)
+ while match:
+ # The actual parsing of the match is implementation-dependent,
+ # so delegate to our helper function
+ k, v, s = self._parse_match(match)
+ if k is None:
+ # That's the signal that no further interpolation is needed
+ replacement = v
+ else:
+ # Further interpolation may be needed to obtain final value
+ replacement = recursive_interpolate(k, v, s, backtrail)
+ # Replace the matched string with its final value
+ start, end = match.span()
+ value = ''.join((value[:start], replacement, value[end:]))
+ new_search_start = start + len(replacement)
+ # Pick up the next interpolation key, if any, for next time
+ # through the while loop
+ match = self._KEYCRE.search(value, new_search_start)
+
+ # Now safe to come back here again; remove marker from backtrail
+ del backtrail[(key, section.name)]
+
+ return value
+
+ # Back in interpolate(), all we have to do is kick off the recursive
+ # function with appropriate starting values
+ value = recursive_interpolate(key, value, self.section, {})
+ return value
+
+ def _fetch(self, key):
+ """Helper function to fetch values from owning section.
+
+ Returns a 2-tuple: the value, and the section where it was found.
+ """
+ # switch off interpolation before we try and fetch anything !
+ save_interp = self.section.main.interpolation
+ self.section.main.interpolation = False
+
+ # Start at section that "owns" this InterpolationEngine
+ current_section = self.section
+ while True:
+ # try the current section first
+ val = current_section.get(key)
+ if val is not None:
+ break
+ # try "DEFAULT" next
+ val = current_section.get('DEFAULT', {}).get(key)
+ if val is not None:
+ break
+ # move up to parent and try again
+ # top-level's parent is itself
+ if current_section.parent is current_section:
+ # reached top level, time to give up
+ break
+ current_section = current_section.parent
+
+ # restore interpolation to previous value before returning
+ self.section.main.interpolation = save_interp
+ if val is None:
+ raise MissingInterpolationOption(key)
+ return val, current_section
+
+ def _parse_match(self, match):
+ """Implementation-dependent helper function.
+
+ Will be passed a match object corresponding to the interpolation
+ key we just found (e.g., "%(foo)s" or "$foo"). Should look up that
+ key in the appropriate config file section (using the ``_fetch()``
+ helper function) and return a 3-tuple: (key, value, section)
+
+ ``key`` is the name of the key we're looking for
+ ``value`` is the value found for that key
+ ``section`` is a reference to the section where it was found
+
+ ``key`` and ``section`` should be None if no further
+ interpolation should be performed on the resulting value
+ (e.g., if we interpolated "$$" and returned "$").
+ """
+ raise NotImplementedError
+
+
+class ConfigParserInterpolation(InterpolationEngine):
+ """Behaves like ConfigParser."""
+ _KEYCRE = re.compile(r"%\(([^)]*)\)s")
+
+ def _parse_match(self, match):
+ key = match.group(1)
+ value, section = self._fetch(key)
+ return key, value, section
+
+
+class TemplateInterpolation(InterpolationEngine):
+ """Behaves like string.Template."""
+ _delimiter = '$'
+ _KEYCRE = re.compile(r"""
+ \$(?:
+ (?P<escaped>\$) | # Two $ signs
+ (?P<named>[_a-z][_a-z0-9]*) | # $name format
+ {(?P<braced>[^}]*)} # ${name} format
+ )
+ """, re.IGNORECASE | re.VERBOSE)
+
+ def _parse_match(self, match):
+ # Valid name (in or out of braces): fetch value from section
+ key = match.group('named') or match.group('braced')
+ if key is not None:
+ value, section = self._fetch(key)
+ return key, value, section
+ # Escaped delimiter (e.g., $$): return single delimiter
+ if match.group('escaped') is not None:
+ # Return None for key and section to indicate it's time to stop
+ return None, self._delimiter, None
+ # Anything else: ignore completely, just return it unchanged
+ return None, match.group(), None
+
+interpolation_engines = {
+ 'configparser': ConfigParserInterpolation,
+ 'template': TemplateInterpolation,
+}
+
+class Section(dict):
+ """
+ A dictionary-like object that represents a section in a config file.
+
+ It does string interpolation if the 'interpolation' attribute
+ of the 'main' object is set to True.
+
+ Interpolation is tried first from this object, then from the 'DEFAULT'
+ section of this object, next from the parent and its 'DEFAULT' section,
+ and so on until the main object is reached.
+
+ A Section will behave like an ordered dictionary - following the
+ order of the ``scalars`` and ``sections`` attributes.
+ You can use this to change the order of members.
+
+ Iteration follows the order: scalars, then sections.
+ """
+
+ def __init__(self, parent, depth, main, indict=None, name=None):
+ """
+ * parent is the section above
+ * depth is the depth level of this section
+ * main is the main ConfigObj
+ * indict is a dictionary to initialise the section with
+ """
+ if indict is None:
+ indict = {}
+ dict.__init__(self)
+ # used for nesting level *and* interpolation
+ self.parent = parent
+ # used for the interpolation attribute
+ self.main = main
+ # level of nesting depth of this Section
+ self.depth = depth
+ # the sequence of scalar values in this Section
+ self.scalars = []
+ # the sequence of sections in this Section
+ self.sections = []
+ # purely for information
+ self.name = name
+ # for comments :-)
+ self.comments = {}
+ self.inline_comments = {}
+ # for the configspec
+ self.configspec = {}
+ self._order = []
+ self._configspec_comments = {}
+ self._configspec_inline_comments = {}
+ self._cs_section_comments = {}
+ self._cs_section_inline_comments = {}
+ # for defaults
+ self.defaults = []
+ #
+ # we do this explicitly so that __setitem__ is used properly
+ # (rather than just passing to ``dict.__init__``)
+ for entry in indict:
+ self[entry] = indict[entry]
+
+ def _interpolate(self, key, value):
+ try:
+ # do we already have an interpolation engine?
+ engine = self._interpolation_engine
+ except AttributeError:
+ # not yet: first time running _interpolate(), so pick the engine
+ name = self.main.interpolation
+ if name == True: # note that "if name:" would be incorrect here
+ # backwards-compatibility: interpolation=True means use default
+ name = DEFAULT_INTERPOLATION
+ name = name.lower() # so that "Template", "template", etc. all work
+ class_ = interpolation_engines.get(name, None)
+ if class_ is None:
+ # invalid value for self.main.interpolation
+ self.main.interpolation = False
+ return value
+ else:
+ # save reference to engine so we don't have to do this again
+ engine = self._interpolation_engine = class_(self)
+ # let the engine do the actual work
+ return engine.interpolate(key, value)
+
+ def __getitem__(self, key):
+ """Fetch the item and do string interpolation."""
+ val = dict.__getitem__(self, key)
+ if self.main.interpolation and isinstance(val, StringTypes):
+ return self._interpolate(key, val)
+ return val
+
+ def __setitem__(self, key, value, unrepr=False):
+ """
+ Correctly set a value.
+
+ Making dictionary values Section instances.
+ (We have to special case 'Section' instances - which are also dicts)
+
+ Keys must be strings.
+ Values need only be strings (or lists of strings) if
+ ``main.stringify`` is set.
+
+ `unrepr`` must be set when setting a value to a dictionary, without
+ creating a new sub-section.
+ """
+ if not isinstance(key, StringTypes):
+ raise ValueError, 'The key "%s" is not a string.' % key
+ # add the comment
+ if not self.comments.has_key(key):
+ self.comments[key] = []
+ self.inline_comments[key] = ''
+ # remove the entry from defaults
+ if key in self.defaults:
+ self.defaults.remove(key)
+ #
+ if isinstance(value, Section):
+ if not self.has_key(key):
+ self.sections.append(key)
+ dict.__setitem__(self, key, value)
+ elif isinstance(value, dict) and not unrepr:
+ # First create the new depth level,
+ # then create the section
+ if not self.has_key(key):
+ self.sections.append(key)
+ new_depth = self.depth + 1
+ dict.__setitem__(
+ self,
+ key,
+ Section(
+ self,
+ new_depth,
+ self.main,
+ indict=value,
+ name=key))
+ else:
+ if not self.has_key(key):
+ self.scalars.append(key)
+ if not self.main.stringify:
+ if isinstance(value, StringTypes):
+ pass
+ elif isinstance(value, (list, tuple)):
+ for entry in value:
+ if not isinstance(entry, StringTypes):
+ raise TypeError, (
+ 'Value is not a string "%s".' % entry)
+ else:
+ raise TypeError, 'Value is not a string "%s".' % value
+ dict.__setitem__(self, key, value)
+
+ def __delitem__(self, key):
+ """Remove items from the sequence when deleting."""
+ dict. __delitem__(self, key)
+ if key in self.scalars:
+ self.scalars.remove(key)
+ else:
+ self.sections.remove(key)
+ del self.comments[key]
+ del self.inline_comments[key]
+
+ def get(self, key, default=None):
+ """A version of ``get`` that doesn't bypass string interpolation."""
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def update(self, indict):
+ """
+ A version of update that uses our ``__setitem__``.
+ """
+ for entry in indict:
+ self[entry] = indict[entry]
+
+ def pop(self, key, *args):
+ """ """
+ val = dict.pop(self, key, *args)
+ if key in self.scalars:
+ del self.comments[key]
+ del self.inline_comments[key]
+ self.scalars.remove(key)
+ elif key in self.sections:
+ del self.comments[key]
+ del self.inline_comments[key]
+ self.sections.remove(key)
+ if self.main.interpolation and isinstance(val, StringTypes):
+ return self._interpolate(key, val)
+ return val
+
+ def popitem(self):
+ """Pops the first (key,val)"""
+ sequence = (self.scalars + self.sections)
+ if not sequence:
+ raise KeyError, ": 'popitem(): dictionary is empty'"
+ key = sequence[0]
+ val = self[key]
+ del self[key]
+ return key, val
+
+ def clear(self):
+ """
+ A version of clear that also affects scalars/sections
+ Also clears comments and configspec.
+
+ Leaves other attributes alone :
+ depth/main/parent are not affected
+ """
+ dict.clear(self)
+ self.scalars = []
+ self.sections = []
+ self.comments = {}
+ self.inline_comments = {}
+ self.configspec = {}
+
+ def setdefault(self, key, default=None):
+ """A version of setdefault that sets sequence if appropriate."""
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return self[key]
+
+ def items(self):
+ """ """
+ return zip((self.scalars + self.sections), self.values())
+
+ def keys(self):
+ """ """
+ return (self.scalars + self.sections)
+
+ def values(self):
+ """ """
+ return [self[key] for key in (self.scalars + self.sections)]
+
+ def iteritems(self):
+ """ """
+ return iter(self.items())
+
+ def iterkeys(self):
+ """ """
+ return iter((self.scalars + self.sections))
+
+ __iter__ = iterkeys
+
+ def itervalues(self):
+ """ """
+ return iter(self.values())
+
+ def __repr__(self):
+ return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(self[key])))
+ for key in (self.scalars + self.sections)])
+
+ __str__ = __repr__
+
+ # Extra methods - not in a normal dictionary
+
+ def dict(self):
+ """
+ Return a deepcopy of self as a dictionary.
+
+ All members that are ``Section`` instances are recursively turned to
+ ordinary dictionaries - by calling their ``dict`` method.
+
+ >>> n = a.dict()
+ >>> n == a
+ 1
+ >>> n is a
+ 0
+ """
+ newdict = {}
+ for entry in self:
+ this_entry = self[entry]
+ if isinstance(this_entry, Section):
+ this_entry = this_entry.dict()
+ elif isinstance(this_entry, list):
+ # create a copy rather than a reference
+ this_entry = list(this_entry)
+ elif isinstance(this_entry, tuple):
+ # create a copy rather than a reference
+ this_entry = tuple(this_entry)
+ newdict[entry] = this_entry
+ return newdict
+
+ def merge(self, indict):
+ """
+ A recursive update - useful for merging config files.
+
+ >>> a = '''[section1]
+ ... option1 = True
+ ... [[subsection]]
+ ... more_options = False
+ ... # end of file'''.splitlines()
+ >>> b = '''# File is user.ini
+ ... [section1]
+ ... option1 = False
+ ... # end of file'''.splitlines()
+ >>> c1 = ConfigObj(b)
+ >>> c2 = ConfigObj(a)
+ >>> c2.merge(c1)
+ >>> c2
+ {'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}
+ """
+ for key, val in indict.items():
+ if (key in self and isinstance(self[key], dict) and
+ isinstance(val, dict)):
+ self[key].merge(val)
+ else:
+ self[key] = val
+
+ def rename(self, oldkey, newkey):
+ """
+ Change a keyname to another, without changing position in sequence.
+
+ Implemented so that transformations can be made on keys,
+ as well as on values. (used by encode and decode)
+
+ Also renames comments.
+ """
+ if oldkey in self.scalars:
+ the_list = self.scalars
+ elif oldkey in self.sections:
+ the_list = self.sections
+ else:
+ raise KeyError, 'Key "%s" not found.' % oldkey
+ pos = the_list.index(oldkey)
+ #
+ val = self[oldkey]
+ dict.__delitem__(self, oldkey)
+ dict.__setitem__(self, newkey, val)
+ the_list.remove(oldkey)
+ the_list.insert(pos, newkey)
+ comm = self.comments[oldkey]
+ inline_comment = self.inline_comments[oldkey]
+ del self.comments[oldkey]
+ del self.inline_comments[oldkey]
+ self.comments[newkey] = comm
+ self.inline_comments[newkey] = inline_comment
+
+ def walk(self, function, raise_errors=True,
+ call_on_sections=False, **keywargs):
+ """
+ Walk every member and call a function on the keyword and value.
+
+ Return a dictionary of the return values
+
+ If the function raises an exception, raise the errror
+ unless ``raise_errors=False``, in which case set the return value to
+ ``False``.
+
+ Any unrecognised keyword arguments you pass to walk, will be pased on
+ to the function you pass in.
+
+ Note: if ``call_on_sections`` is ``True`` then - on encountering a
+ subsection, *first* the function is called for the *whole* subsection,
+ and then recurses into its members. This means your function must be
+ able to handle strings, dictionaries and lists. This allows you
+ to change the key of subsections as well as for ordinary members. The
+ return value when called on the whole subsection has to be discarded.
+
+ See the encode and decode methods for examples, including functions.
+
+ .. caution::
+
+ You can use ``walk`` to transform the names of members of a section
+ but you mustn't add or delete members.
+
+ >>> config = '''[XXXXsection]
+ ... XXXXkey = XXXXvalue'''.splitlines()
+ >>> cfg = ConfigObj(config)
+ >>> cfg
+ {'XXXXsection': {'XXXXkey': 'XXXXvalue'}}
+ >>> def transform(section, key):
+ ... val = section[key]
+ ... newkey = key.replace('XXXX', 'CLIENT1')
+ ... section.rename(key, newkey)
+ ... if isinstance(val, (tuple, list, dict)):
+ ... pass
+ ... else:
+ ... val = val.replace('XXXX', 'CLIENT1')
+ ... section[newkey] = val
+ >>> cfg.walk(transform, call_on_sections=True)
+ {'CLIENT1section': {'CLIENT1key': None}}
+ >>> cfg
+ {'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}}
+ """
+ out = {}
+ # scalars first
+ for i in range(len(self.scalars)):
+ entry = self.scalars[i]
+ try:
+ val = function(self, entry, **keywargs)
+ # bound again in case name has changed
+ entry = self.scalars[i]
+ out[entry] = val
+ except Exception:
+ if raise_errors:
+ raise
+ else:
+ entry = self.scalars[i]
+ out[entry] = False
+ # then sections
+ for i in range(len(self.sections)):
+ entry = self.sections[i]
+ if call_on_sections:
+ try:
+ function(self, entry, **keywargs)
+ except Exception:
+ if raise_errors:
+ raise
+ else:
+ entry = self.sections[i]
+ out[entry] = False
+ # bound again in case name has changed
+ entry = self.sections[i]
+ # previous result is discarded
+ out[entry] = self[entry].walk(
+ function,
+ raise_errors=raise_errors,
+ call_on_sections=call_on_sections,
+ **keywargs)
+ return out
+
+ def decode(self, encoding):
+ """
+ Decode all strings and values to unicode, using the specified encoding.
+
+ Works with subsections and list values.
+
+ Uses the ``walk`` method.
+
+ Testing ``encode`` and ``decode``.
+ >>> m = ConfigObj(a)
+ >>> m.decode('ascii')
+ >>> def testuni(val):
+ ... for entry in val:
+ ... if not isinstance(entry, unicode):
+ ... print >> sys.stderr, type(entry)
+ ... raise AssertionError, 'decode failed.'
+ ... if isinstance(val[entry], dict):
+ ... testuni(val[entry])
+ ... elif not isinstance(val[entry], unicode):
+ ... raise AssertionError, 'decode failed.'
+ >>> testuni(m)
+ >>> m.encode('ascii')
+ >>> a == m
+ 1
+ """
+ warn('use of ``decode`` is deprecated.', DeprecationWarning)
+ def decode(section, key, encoding=encoding, warn=True):
+ """ """
+ val = section[key]
+ if isinstance(val, (list, tuple)):
+ newval = []
+ for entry in val:
+ newval.append(entry.decode(encoding))
+ elif isinstance(val, dict):
+ newval = val
+ else:
+ newval = val.decode(encoding)
+ newkey = key.decode(encoding)
+ section.rename(key, newkey)
+ section[newkey] = newval
+ # using ``call_on_sections`` allows us to modify section names
+ self.walk(decode, call_on_sections=True)
+
+ def encode(self, encoding):
+ """
+ Encode all strings and values from unicode,
+ using the specified encoding.
+
+ Works with subsections and list values.
+ Uses the ``walk`` method.
+ """
+ warn('use of ``encode`` is deprecated.', DeprecationWarning)
+ def encode(section, key, encoding=encoding):
+ """ """
+ val = section[key]
+ if isinstance(val, (list, tuple)):
+ newval = []
+ for entry in val:
+ newval.append(entry.encode(encoding))
+ elif isinstance(val, dict):
+ newval = val
+ else:
+ newval = val.encode(encoding)
+ newkey = key.encode(encoding)
+ section.rename(key, newkey)
+ section[newkey] = newval
+ self.walk(encode, call_on_sections=True)
+
+ def istrue(self, key):
+ """A deprecated version of ``as_bool``."""
+ warn('use of ``istrue`` is deprecated. Use ``as_bool`` method '
+ 'instead.', DeprecationWarning)
+ return self.as_bool(key)
+
+ def as_bool(self, key):
+ """
+ Accepts a key as input. The corresponding value must be a string or
+ the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
+ retain compatibility with Python 2.2.
+
+ If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns
+ ``True``.
+
+ If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns
+ ``False``.
+
+ ``as_bool`` is not case sensitive.
+
+ Any other input will raise a ``ValueError``.
+
+ >>> a = ConfigObj()
+ >>> a['a'] = 'fish'
+ >>> a.as_bool('a')
+ Traceback (most recent call last):
+ ValueError: Value "fish" is neither True nor False
+ >>> a['b'] = 'True'
+ >>> a.as_bool('b')
+ 1
+ >>> a['b'] = 'off'
+ >>> a.as_bool('b')
+ 0
+ """
+ val = self[key]
+ if val == True:
+ return True
+ elif val == False:
+ return False
+ else:
+ try:
+ if not isinstance(val, StringTypes):
+ raise KeyError
+ else:
+ return self.main._bools[val.lower()]
+ except KeyError:
+ raise ValueError('Value "%s" is neither True nor False' % val)
+
+ def as_int(self, key):
+ """
+ A convenience method which coerces the specified value to an integer.
+
+ If the value is an invalid literal for ``int``, a ``ValueError`` will
+ be raised.
+
+ >>> a = ConfigObj()
+ >>> a['a'] = 'fish'
+ >>> a.as_int('a')
+ Traceback (most recent call last):
+ ValueError: invalid literal for int(): fish
+ >>> a['b'] = '1'
+ >>> a.as_int('b')
+ 1
+ >>> a['b'] = '3.2'
+ >>> a.as_int('b')
+ Traceback (most recent call last):
+ ValueError: invalid literal for int(): 3.2
+ """
+ return int(self[key])
+
+ def as_float(self, key):
+ """
+ A convenience method which coerces the specified value to a float.
+
+ If the value is an invalid literal for ``float``, a ``ValueError`` will
+ be raised.
+
+ >>> a = ConfigObj()
+ >>> a['a'] = 'fish'
+ >>> a.as_float('a')
+ Traceback (most recent call last):
+ ValueError: invalid literal for float(): fish
+ >>> a['b'] = '1'
+ >>> a.as_float('b')
+ 1.0
+ >>> a['b'] = '3.2'
+ >>> a.as_float('b')
+ 3.2000000000000002
+ """
+ return float(self[key])
+
+
+class ConfigObj(Section):
+ """An object to read, create, and write config files."""
+
+ _keyword = re.compile(r'''^ # line start
+ (\s*) # indentation
+ ( # keyword
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'"=].*?) # no quotes
+ )
+ \s*=\s* # divider
+ (.*) # value (including list values and comments)
+ $ # line end
+ ''',
+ re.VERBOSE)
+
+ _sectionmarker = re.compile(r'''^
+ (\s*) # 1: indentation
+ ((?:\[\s*)+) # 2: section marker open
+ ( # 3: section name open
+ (?:"\s*\S.*?\s*")| # at least one non-space with double quotes
+ (?:'\s*\S.*?\s*')| # at least one non-space with single quotes
+ (?:[^'"\s].*?) # at least one non-space unquoted
+ ) # section name close
+ ((?:\s*\])+) # 4: section marker close
+ \s*(\#.*)? # 5: optional comment
+ $''',
+ re.VERBOSE)
+
+ # this regexp pulls list values out as a single string
+ # or single values and comments
+ # FIXME: this regex adds a '' to the end of comma terminated lists
+ # workaround in ``_handle_value``
+ _valueexp = re.compile(r'''^
+ (?:
+ (?:
+ (
+ (?:
+ (?:
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'",\#][^,\#]*?) # unquoted
+ )
+ \s*,\s* # comma
+ )* # match all list items ending in a comma (if any)
+ )
+ (
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'",\#\s][^,]*?)| # unquoted
+ (?:(?<!,)) # Empty value
+ )? # last item in a list - or string value
+ )|
+ (,) # alternatively a single comma - empty list
+ )
+ \s*(\#.*)? # optional comment
+ $''',
+ re.VERBOSE)
+
+ # use findall to get the members of a list value
+ _listvalueexp = re.compile(r'''
+ (
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'",\#].*?) # unquoted
+ )
+ \s*,\s* # comma
+ ''',
+ re.VERBOSE)
+
+ # this regexp is used for the value
+ # when lists are switched off
+ _nolistvalue = re.compile(r'''^
+ (
+ (?:".*?")| # double quotes
+ (?:'.*?')| # single quotes
+ (?:[^'"\#].*?)| # unquoted
+ (?:) # Empty value
+ )
+ \s*(\#.*)? # optional comment
+ $''',
+ re.VERBOSE)
+
+ # regexes for finding triple quoted values on one line
+ _single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$")
+ _single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$')
+ _multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$")
+ _multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$')
+
+ _triple_quote = {
+ "'''": (_single_line_single, _multi_line_single),
+ '"""': (_single_line_double, _multi_line_double),
+ }
+
+ # Used by the ``istrue`` Section method
+ _bools = {
+ 'yes': True, 'no': False,
+ 'on': True, 'off': False,
+ '1': True, '0': False,
+ 'true': True, 'false': False,
+ }
+
+ def __init__(self, infile=None, options=None, **kwargs):
+ """
+ Parse or create a config file object.
+
+ ``ConfigObj(infile=None, options=None, **kwargs)``
+ """
+ if infile is None:
+ infile = []
+ if options is None:
+ options = {}
+ else:
+ options = dict(options)
+ # keyword arguments take precedence over an options dictionary
+ options.update(kwargs)
+ # init the superclass
+ Section.__init__(self, self, 0, self)
+ #
+ defaults = OPTION_DEFAULTS.copy()
+ for entry in options.keys():
+ if entry not in defaults.keys():
+ raise TypeError, 'Unrecognised option "%s".' % entry
+ # TODO: check the values too.
+ #
+ # Add any explicit options to the defaults
+ defaults.update(options)
+ #
+ # initialise a few variables
+ self.filename = None
+ self._errors = []
+ self.raise_errors = defaults['raise_errors']
+ self.interpolation = defaults['interpolation']
+ self.list_values = defaults['list_values']
+ self.create_empty = defaults['create_empty']
+ self.file_error = defaults['file_error']
+ self.stringify = defaults['stringify']
+ self.indent_type = defaults['indent_type']
+ self.encoding = defaults['encoding']
+ self.default_encoding = defaults['default_encoding']
+ self.BOM = False
+ self.newlines = None
+ self.write_empty_values = defaults['write_empty_values']
+ self.unrepr = defaults['unrepr']
+ #
+ self.initial_comment = []
+ self.final_comment = []
+ #
+ self._terminated = False
+ #
+ if isinstance(infile, StringTypes):
+ self.filename = infile
+ if os.path.isfile(infile):
+ infile = open(infile).read() or []
+ elif self.file_error:
+ # raise an error if the file doesn't exist
+ raise IOError, 'Config file not found: "%s".' % self.filename
+ else:
+ # file doesn't already exist
+ if self.create_empty:
+ # this is a good test that the filename specified
+ # isn't impossible - like on a non existent device
+ h = open(infile, 'w')
+ h.write('')
+ h.close()
+ infile = []
+ elif isinstance(infile, (list, tuple)):
+ infile = list(infile)
+ elif isinstance(infile, dict):
+ # initialise self
+ # the Section class handles creating subsections
+ if isinstance(infile, ConfigObj):
+ # get a copy of our ConfigObj
+ infile = infile.dict()
+ for entry in infile:
+ self[entry] = infile[entry]
+ del self._errors
+ if defaults['configspec'] is not None:
+ self._handle_configspec(defaults['configspec'])
+ else:
+ self.configspec = None
+ return
+ elif hasattr(infile, 'read'):
+ # This supports file like objects
+ infile = infile.read() or []
+ # needs splitting into lines - but needs doing *after* decoding
+ # in case it's not an 8 bit encoding
+ else:
+ raise TypeError, ('infile must be a filename,'
+ ' file like object, or list of lines.')
+ #
+ if infile:
+ # don't do it for the empty ConfigObj
+ infile = self._handle_bom(infile)
+ # infile is now *always* a list
+ #
+ # Set the newlines attribute (first line ending it finds)
+ # and strip trailing '\n' or '\r' from lines
+ for line in infile:
+ if (not line) or (line[-1] not in ('\r', '\n', '\r\n')):
+ continue
+ for end in ('\r\n', '\n', '\r'):
+ if line.endswith(end):
+ self.newlines = end
+ break
+ break
+ if infile[-1] and infile[-1] in ('\r', '\n', '\r\n'):
+ self._terminated = True
+ infile = [line.rstrip('\r\n') for line in infile]
+ #
+ self._parse(infile)
+ # if we had any errors, now is the time to raise them
+ if self._errors:
+ info = "at line %s." % self._errors[0].line_number
+ if len(self._errors) > 1:
+ msg = ("Parsing failed with several errors.\nFirst error %s" %
+ info)
+ error = ConfigObjError(msg)
+ else:
+ error = self._errors[0]
+ # set the errors attribute; it's a list of tuples:
+ # (error_type, message, line_number)
+ error.errors = self._errors
+ # set the config attribute
+ error.config = self
+ raise error
+ # delete private attributes
+ del self._errors
+ #
+ if defaults['configspec'] is None:
+ self.configspec = None
+ else:
+ self._handle_configspec(defaults['configspec'])
+
+ def __repr__(self):
+ return 'ConfigObj({%s})' % ', '.join(
+ [('%s: %s' % (repr(key), repr(self[key]))) for key in
+ (self.scalars + self.sections)])
+
+ def _handle_bom(self, infile):
+ """
+ Handle any BOM, and decode if necessary.
+
+ If an encoding is specified, that *must* be used - but the BOM should
+ still be removed (and the BOM attribute set).
+
+ (If the encoding is wrongly specified, then a BOM for an alternative
+ encoding won't be discovered or removed.)
+
+ If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
+ removed. The BOM attribute will be set. UTF16 will be decoded to
+ unicode.
+
+ NOTE: This method must not be called with an empty ``infile``.
+
+ Specifying the *wrong* encoding is likely to cause a
+ ``UnicodeDecodeError``.
+
+ ``infile`` must always be returned as a list of lines, but may be
+ passed in as a single string.
+ """
+ if ((self.encoding is not None) and
+ (self.encoding.lower() not in BOM_LIST)):
+ # No need to check for a BOM
+ # the encoding specified doesn't have one
+ # just decode
+ return self._decode(infile, self.encoding)
+ #
+ if isinstance(infile, (list, tuple)):
+ line = infile[0]
+ else:
+ line = infile
+ if self.encoding is not None:
+ # encoding explicitly supplied
+ # And it could have an associated BOM
+ # TODO: if encoding is just UTF16 - we ought to check for both
+ # TODO: big endian and little endian versions.
+ enc = BOM_LIST[self.encoding.lower()]
+ if enc == 'utf_16':
+ # For UTF16 we try big endian and little endian
+ for BOM, (encoding, final_encoding) in BOMS.items():
+ if not final_encoding:
+ # skip UTF8
+ continue
+ if infile.startswith(BOM):
+ ### BOM discovered
+ ##self.BOM = True
+ # Don't need to remove BOM
+ return self._decode(infile, encoding)
+ #
+ # If we get this far, will *probably* raise a DecodeError
+ # As it doesn't appear to start with a BOM
+ return self._decode(infile, self.encoding)
+ #
+ # Must be UTF8
+ BOM = BOM_SET[enc]
+ if not line.startswith(BOM):
+ return self._decode(infile, self.encoding)
+ #
+ newline = line[len(BOM):]
+ #
+ # BOM removed
+ if isinstance(infile, (list, tuple)):
+ infile[0] = newline
+ else:
+ infile = newline
+ self.BOM = True
+ return self._decode(infile, self.encoding)
+ #
+ # No encoding specified - so we need to check for UTF8/UTF16
+ for BOM, (encoding, final_encoding) in BOMS.items():
+ if not line.startswith(BOM):
+ continue
+ else:
+ # BOM discovered
+ self.encoding = final_encoding
+ if not final_encoding:
+ self.BOM = True
+ # UTF8
+ # remove BOM
+ newline = line[len(BOM):]
+ if isinstance(infile, (list, tuple)):
+ infile[0] = newline
+ else:
+ infile = newline
+ # UTF8 - don't decode
+ if isinstance(infile, StringTypes):
+ return infile.splitlines(True)
+ else:
+ return infile
+ # UTF16 - have to decode
+ return self._decode(infile, encoding)
+ #
+ # No BOM discovered and no encoding specified, just return
+ if isinstance(infile, StringTypes):
+ # infile read from a file will be a single string
+ return infile.splitlines(True)
+ else:
+ return infile
+
+ def _a_to_u(self, aString):
+ """Decode ASCII strings to unicode if a self.encoding is specified."""
+ if self.encoding:
+ return aString.decode('ascii')
+ else:
+ return aString
+
+ def _decode(self, infile, encoding):
+ """
+ Decode infile to unicode. Using the specified encoding.
+
+ if is a string, it also needs converting to a list.
+ """
+ if isinstance(infile, StringTypes):
+ # can't be unicode
+ # NOTE: Could raise a ``UnicodeDecodeError``
+ return infile.decode(encoding).splitlines(True)
+ for i, line in enumerate(infile):
+ if not isinstance(line, unicode):
+ # NOTE: The isinstance test here handles mixed lists of unicode/string
+ # NOTE: But the decode will break on any non-string values
+ # NOTE: Or could raise a ``UnicodeDecodeError``
+ infile[i] = line.decode(encoding)
+ return infile
+
+ def _decode_element(self, line):
+ """Decode element to unicode if necessary."""
+ if not self.encoding:
+ return line
+ if isinstance(line, str) and self.default_encoding:
+ return line.decode(self.default_encoding)
+ return line
+
+ def _str(self, value):
+ """
+ Used by ``stringify`` within validate, to turn non-string values
+ into strings.
+ """
+ if not isinstance(value, StringTypes):
+ return str(value)
+ else:
+ return value
+
+ def _parse(self, infile):
+ """Actually parse the config file."""
+ temp_list_values = self.list_values
+ if self.unrepr:
+ self.list_values = False
+ comment_list = []
+ done_start = False
+ this_section = self
+ maxline = len(infile) - 1
+ cur_index = -1
+ reset_comment = False
+ while cur_index < maxline:
+ if reset_comment:
+ comment_list = []
+ cur_index += 1
+ line = infile[cur_index]
+ sline = line.strip()
+ # do we have anything on the line ?
+ if not sline or sline.startswith('#') or sline.startswith(';'):
+ reset_comment = False
+ comment_list.append(line)
+ continue
+ if not done_start:
+ # preserve initial comment
+ self.initial_comment = comment_list
+ comment_list = []
+ done_start = True
+ reset_comment = True
+ # first we check if it's a section marker
+ mat = self._sectionmarker.match(line)
+ if mat is not None:
+ # is a section line
+ (indent, sect_open, sect_name, sect_close, comment) = (
+ mat.groups())
+ if indent and (self.indent_type is None):
+ self.indent_type = indent
+ cur_depth = sect_open.count('[')
+ if cur_depth != sect_close.count(']'):
+ self._handle_error(
+ "Cannot compute the section depth at line %s.",
+ NestingError, infile, cur_index)
+ continue
+ #
+ if cur_depth < this_section.depth:
+ # the new section is dropping back to a previous level
+ try:
+ parent = self._match_depth(
+ this_section,
+ cur_depth).parent
+ except SyntaxError:
+ self._handle_error(
+ "Cannot compute nesting level at line %s.",
+ NestingError, infile, cur_index)
+ continue
+ elif cur_depth == this_section.depth:
+ # the new section is a sibling of the current section
+ parent = this_section.parent
+ elif cur_depth == this_section.depth + 1:
+ # the new section is a child the current section
+ parent = this_section
+ else:
+ self._handle_error(
+ "Section too nested at line %s.",
+ NestingError, infile, cur_index)
+ #
+ sect_name = self._unquote(sect_name)
+ if parent.has_key(sect_name):
+ self._handle_error(
+ 'Duplicate section name at line %s.',
+ DuplicateError, infile, cur_index)
+ continue
+ # create the new section
+ this_section = Section(
+ parent,
+ cur_depth,
+ self,
+ name=sect_name)
+ parent[sect_name] = this_section
+ parent.inline_comments[sect_name] = comment
+ parent.comments[sect_name] = comment_list
+ continue
+ #
+ # it's not a section marker,
+ # so it should be a valid ``key = value`` line
+ mat = self._keyword.match(line)
+ if mat is None:
+ # it neither matched as a keyword
+ # or a section marker
+ self._handle_error(
+ 'Invalid line at line "%s".',
+ ParseError, infile, cur_index)
+ else:
+ # is a keyword value
+ # value will include any inline comment
+ (indent, key, value) = mat.groups()
+ if indent and (self.indent_type is None):
+ self.indent_type = indent
+ # check for a multiline value
+ if value[:3] in ['"""', "'''"]:
+ try:
+ (value, comment, cur_index) = self._multiline(
+ value, infile, cur_index, maxline)
+ except SyntaxError:
+ self._handle_error(
+ 'Parse error in value at line %s.',
+ ParseError, infile, cur_index)
+ continue
+ else:
+ if self.unrepr:
+ comment = ''
+ try:
+ value = unrepr(value)
+ except Exception, e:
+ if type(e) == UnknownType:
+ msg = 'Unknown name or type in value at line %s.'
+ else:
+ msg = 'Parse error in value at line %s.'
+ self._handle_error(msg, UnreprError, infile,
+ cur_index)
+ continue
+ else:
+ if self.unrepr:
+ comment = ''
+ try:
+ value = unrepr(value)
+ except Exception, e:
+ if isinstance(e, UnknownType):
+ msg = 'Unknown name or type in value at line %s.'
+ else:
+ msg = 'Parse error in value at line %s.'
+ self._handle_error(msg, UnreprError, infile,
+ cur_index)
+ continue
+ else:
+ # extract comment and lists
+ try:
+ (value, comment) = self._handle_value(value)
+ except SyntaxError:
+ self._handle_error(
+ 'Parse error in value at line %s.',
+ ParseError, infile, cur_index)
+ continue
+ #
+ key = self._unquote(key)
+ if this_section.has_key(key):
+ self._handle_error(
+ 'Duplicate keyword name at line %s.',
+ DuplicateError, infile, cur_index)
+ continue
+ # add the key.
+ # we set unrepr because if we have got this far we will never
+ # be creating a new section
+ this_section.__setitem__(key, value, unrepr=True)
+ this_section.inline_comments[key] = comment
+ this_section.comments[key] = comment_list
+ continue
+ #
+ if self.indent_type is None:
+ # no indentation used, set the type accordingly
+ self.indent_type = ''
+ #
+ if self._terminated:
+ comment_list.append('')
+ # preserve the final comment
+ if not self and not self.initial_comment:
+ self.initial_comment = comment_list
+ elif not reset_comment:
+ self.final_comment = comment_list
+ self.list_values = temp_list_values
+
+ def _match_depth(self, sect, depth):
+ """
+ Given a section and a depth level, walk back through the sections
+ parents to see if the depth level matches a previous section.
+
+ Return a reference to the right section,
+ or raise a SyntaxError.
+ """
+ while depth < sect.depth:
+ if sect is sect.parent:
+ # we've reached the top level already
+ raise SyntaxError
+ sect = sect.parent
+ if sect.depth == depth:
+ return sect
+ # shouldn't get here
+ raise SyntaxError
+
+ def _handle_error(self, text, ErrorClass, infile, cur_index):
+ """
+ Handle an error according to the error settings.
+
+ Either raise the error or store it.
+ The error will have occurred at ``cur_index``
+ """
+ line = infile[cur_index]
+ cur_index += 1
+ message = text % cur_index
+ error = ErrorClass(message, cur_index, line)
+ if self.raise_errors:
+ # raise the error - parsing stops here
+ raise error
+ # store the error
+ # reraise when parsing has finished
+ self._errors.append(error)
+
+ def _unquote(self, value):
+ """Return an unquoted version of a value"""
+ if (value[0] == value[-1]) and (value[0] in ('"', "'")):
+ value = value[1:-1]
+ return value
+
+ def _quote(self, value, multiline=True):
+ """
+ Return a safely quoted version of a value.
+
+ Raise a ConfigObjError if the value cannot be safely quoted.
+ If multiline is ``True`` (default) then use triple quotes
+ if necessary.
+
+ Don't quote values that don't need it.
+ Recursively quote members of a list and return a comma joined list.
+ Multiline is ``False`` for lists.
+ Obey list syntax for empty and single member lists.
+
+ If ``list_values=False`` then the value is only quoted if it contains
+ a ``\n`` (is multiline).
+
+ If ``write_empty_values`` is set, and the value is an empty string, it
+ won't be quoted.
+ """
+ if multiline and self.write_empty_values and value == '':
+ # Only if multiline is set, so that it is used for values not
+ # keys, and not values that are part of a list
+ return ''
+ if multiline and isinstance(value, (list, tuple)):
+ if not value:
+ return ','
+ elif len(value) == 1:
+ return self._quote(value[0], multiline=False) + ','
+ return ', '.join([self._quote(val, multiline=False)
+ for val in value])
+ if not isinstance(value, StringTypes):
+ if self.stringify:
+ value = str(value)
+ else:
+ raise TypeError, 'Value "%s" is not a string.' % value
+ squot = "'%s'"
+ dquot = '"%s"'
+ noquot = "%s"
+ wspace_plus = ' \r\t\n\v\t\'"'
+ tsquot = '"""%s"""'
+ tdquot = "'''%s'''"
+ if not value:
+ return '""'
+ if (not self.list_values and '\n' not in value) or not (multiline and
+ ((("'" in value) and ('"' in value)) or ('\n' in value))):
+ if not self.list_values:
+ # we don't quote if ``list_values=False``
+ quot = noquot
+ # for normal values either single or double quotes will do
+ elif '\n' in value:
+ # will only happen if multiline is off - e.g. '\n' in key
+ raise ConfigObjError, ('Value "%s" cannot be safely quoted.' %
+ value)
+ elif ((value[0] not in wspace_plus) and
+ (value[-1] not in wspace_plus) and
+ (',' not in value)):
+ quot = noquot
+ else:
+ if ("'" in value) and ('"' in value):
+ raise ConfigObjError, (
+ 'Value "%s" cannot be safely quoted.' % value)
+ elif '"' in value:
+ quot = squot
+ else:
+ quot = dquot
+ else:
+ # if value has '\n' or "'" *and* '"', it will need triple quotes
+ if (value.find('"""') != -1) and (value.find("'''") != -1):
+ raise ConfigObjError, (
+ 'Value "%s" cannot be safely quoted.' % value)
+ if value.find('"""') == -1:
+ quot = tdquot
+ else:
+ quot = tsquot
+ return quot % value
+
+ def _handle_value(self, value):
+ """
+ Given a value string, unquote, remove comment,
+ handle lists. (including empty and single member lists)
+ """
+ # do we look for lists in values ?
+ if not self.list_values:
+ mat = self._nolistvalue.match(value)
+ if mat is None:
+ raise SyntaxError
+ # NOTE: we don't unquote here
+ return mat.groups()
+ #
+ mat = self._valueexp.match(value)
+ if mat is None:
+ # the value is badly constructed, probably badly quoted,
+ # or an invalid list
+ raise SyntaxError
+ (list_values, single, empty_list, comment) = mat.groups()
+ if (list_values == '') and (single is None):
+ # change this if you want to accept empty values
+ raise SyntaxError
+ # NOTE: note there is no error handling from here if the regex
+ # is wrong: then incorrect values will slip through
+ if empty_list is not None:
+ # the single comma - meaning an empty list
+ return ([], comment)
+ if single is not None:
+ # handle empty values
+ if list_values and not single:
+ # FIXME: the '' is a workaround because our regex now matches
+ # '' at the end of a list if it has a trailing comma
+ single = None
+ else:
+ single = single or '""'
+ single = self._unquote(single)
+ if list_values == '':
+ # not a list value
+ return (single, comment)
+ the_list = self._listvalueexp.findall(list_values)
+ the_list = [self._unquote(val) for val in the_list]
+ if single is not None:
+ the_list += [single]
+ return (the_list, comment)
+
+ def _multiline(self, value, infile, cur_index, maxline):
+ """Extract the value, where we are in a multiline situation."""
+ quot = value[:3]
+ newvalue = value[3:]
+ single_line = self._triple_quote[quot][0]
+ multi_line = self._triple_quote[quot][1]
+ mat = single_line.match(value)
+ if mat is not None:
+ retval = list(mat.groups())
+ retval.append(cur_index)
+ return retval
+ elif newvalue.find(quot) != -1:
+ # somehow the triple quote is missing
+ raise SyntaxError
+ #
+ while cur_index < maxline:
+ cur_index += 1
+ newvalue += '\n'
+ line = infile[cur_index]
+ if line.find(quot) == -1:
+ newvalue += line
+ else:
+ # end of multiline, process it
+ break
+ else:
+ # we've got to the end of the config, oops...
+ raise SyntaxError
+ mat = multi_line.match(line)
+ if mat is None:
+ # a badly formed line
+ raise SyntaxError
+ (value, comment) = mat.groups()
+ return (newvalue + value, comment, cur_index)
+
+ def _handle_configspec(self, configspec):
+ """Parse the configspec."""
+ # FIXME: Should we check that the configspec was created with the
+ # correct settings ? (i.e. ``list_values=False``)
+ if not isinstance(configspec, ConfigObj):
+ try:
+ configspec = ConfigObj(
+ configspec,
+ raise_errors=True,
+ file_error=True,
+ list_values=False)
+ except ConfigObjError, e:
+ # FIXME: Should these errors have a reference
+ # to the already parsed ConfigObj ?
+ raise ConfigspecError('Parsing configspec failed: %s' % e)
+ except IOError, e:
+ raise IOError('Reading configspec failed: %s' % e)
+ self._set_configspec_value(configspec, self)
+
+ def _set_configspec_value(self, configspec, section):
+ """Used to recursively set configspec values."""
+ if '__many__' in configspec.sections:
+ section.configspec['__many__'] = configspec['__many__']
+ if len(configspec.sections) > 1:
+ # FIXME: can we supply any useful information here ?
+ raise RepeatSectionError
+ if hasattr(configspec, 'initial_comment'):
+ section._configspec_initial_comment = configspec.initial_comment
+ section._configspec_final_comment = configspec.final_comment
+ section._configspec_encoding = configspec.encoding
+ section._configspec_BOM = configspec.BOM
+ section._configspec_newlines = configspec.newlines
+ section._configspec_indent_type = configspec.indent_type
+ for entry in configspec.scalars:
+ section._configspec_comments[entry] = configspec.comments[entry]
+ section._configspec_inline_comments[entry] = (
+ configspec.inline_comments[entry])
+ section.configspec[entry] = configspec[entry]
+ section._order.append(entry)
+ for entry in configspec.sections:
+ if entry == '__many__':
+ continue
+ section._cs_section_comments[entry] = configspec.comments[entry]
+ section._cs_section_inline_comments[entry] = (
+ configspec.inline_comments[entry])
+ if not section.has_key(entry):
+ section[entry] = {}
+ self._set_configspec_value(configspec[entry], section[entry])
+
+ def _handle_repeat(self, section, configspec):
+ """Dynamically assign configspec for repeated section."""
+ try:
+ section_keys = configspec.sections
+ scalar_keys = configspec.scalars
+ except AttributeError:
+ section_keys = [entry for entry in configspec
+ if isinstance(configspec[entry], dict)]
+ scalar_keys = [entry for entry in configspec
+ if not isinstance(configspec[entry], dict)]
+ if '__many__' in section_keys and len(section_keys) > 1:
+ # FIXME: can we supply any useful information here ?
+ raise RepeatSectionError
+ scalars = {}
+ sections = {}
+ for entry in scalar_keys:
+ val = configspec[entry]
+ scalars[entry] = val
+ for entry in section_keys:
+ val = configspec[entry]
+ if entry == '__many__':
+ scalars[entry] = val
+ continue
+ sections[entry] = val
+ #
+ section.configspec = scalars
+ for entry in sections:
+ if not section.has_key(entry):
+ section[entry] = {}
+ self._handle_repeat(section[entry], sections[entry])
+
+ def _write_line(self, indent_string, entry, this_entry, comment):
+ """Write an individual line, for the write method"""
+ # NOTE: the calls to self._quote here handles non-StringType values.
+ if not self.unrepr:
+ val = self._decode_element(self._quote(this_entry))
+ else:
+ val = repr(this_entry)
+ return '%s%s%s%s%s' % (
+ indent_string,
+ self._decode_element(self._quote(entry, multiline=False)),
+ self._a_to_u(' = '),
+ val,
+ self._decode_element(comment))
+
+ def _write_marker(self, indent_string, depth, entry, comment):
+ """Write a section marker line"""
+ return '%s%s%s%s%s' % (
+ indent_string,
+ self._a_to_u('[' * depth),
+ self._quote(self._decode_element(entry), multiline=False),
+ self._a_to_u(']' * depth),
+ self._decode_element(comment))
+
+ def _handle_comment(self, comment):
+ """Deal with a comment."""
+ if not comment:
+ return ''
+ start = self.indent_type
+ if not comment.startswith('#'):
+ start += self._a_to_u(' # ')
+ return (start + comment)
+
+ # Public methods
+
+ def write(self, outfile=None, section=None):
+ """
+ Write the current ConfigObj as a file
+
+ tekNico: FIXME: use StringIO instead of real files
+
+ >>> filename = a.filename
+ >>> a.filename = 'test.ini'
+ >>> a.write()
+ >>> a.filename = filename
+ >>> a == ConfigObj('test.ini', raise_errors=True)
+ 1
+ """
+ if self.indent_type is None:
+ # this can be true if initialised from a dictionary
+ self.indent_type = DEFAULT_INDENT_TYPE
+ #
+ out = []
+ cs = self._a_to_u('#')
+ csp = self._a_to_u('# ')
+ if section is None:
+ int_val = self.interpolation
+ self.interpolation = False
+ section = self
+ for line in self.initial_comment:
+ line = self._decode_element(line)
+ stripped_line = line.strip()
+ if stripped_line and not stripped_line.startswith(cs):
+ line = csp + line
+ out.append(line)
+ #
+ indent_string = self.indent_type * section.depth
+ for entry in (section.scalars + section.sections):
+ if entry in section.defaults:
+ # don't write out default values
+ continue
+ for comment_line in section.comments[entry]:
+ comment_line = self._decode_element(comment_line.lstrip())
+ if comment_line and not comment_line.startswith(cs):
+ comment_line = csp + comment_line
+ out.append(indent_string + comment_line)
+ this_entry = section[entry]
+ comment = self._handle_comment(section.inline_comments[entry])
+ #
+ if isinstance(this_entry, dict):
+ # a section
+ out.append(self._write_marker(
+ indent_string,
+ this_entry.depth,
+ entry,
+ comment))
+ out.extend(self.write(section=this_entry))
+ else:
+ out.append(self._write_line(
+ indent_string,
+ entry,
+ this_entry,
+ comment))
+ #
+ if section is self:
+ for line in self.final_comment:
+ line = self._decode_element(line)
+ stripped_line = line.strip()
+ if stripped_line and not stripped_line.startswith(cs):
+ line = csp + line
+ out.append(line)
+ self.interpolation = int_val
+ #
+ if section is not self:
+ return out
+ #
+ if (self.filename is None) and (outfile is None):
+ # output a list of lines
+ # might need to encode
+ # NOTE: This will *screw* UTF16, each line will start with the BOM
+ if self.encoding:
+ out = [l.encode(self.encoding) for l in out]
+ if (self.BOM and ((self.encoding is None) or
+ (BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
+ # Add the UTF8 BOM
+ if not out:
+ out.append('')
+ out[0] = BOM_UTF8 + out[0]
+ return out
+ #
+ # Turn the list to a string, joined with correct newlines
+ output = (self._a_to_u(self.newlines or os.linesep)
+ ).join(out)
+ if self.encoding:
+ output = output.encode(self.encoding)
+ if (self.BOM and ((self.encoding is None) or
+ (BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
+ # Add the UTF8 BOM
+ output = BOM_UTF8 + output
+ if outfile is not None:
+ outfile.write(output)
+ else:
+ h = open(self.filename, 'wb')
+ h.write(output)
+ h.close()
+
+ def validate(self, validator, preserve_errors=False, copy=False,
+ section=None):
+ """
+ Test the ConfigObj against a configspec.
+
+ It uses the ``validator`` object from *validate.py*.
+
+ To run ``validate`` on the current ConfigObj, call: ::
+
+ test = config.validate(validator)
+
+ (Normally having previously passed in the configspec when the ConfigObj
+ was created - you can dynamically assign a dictionary of checks to the
+ ``configspec`` attribute of a section though).
+
+ It returns ``True`` if everything passes, or a dictionary of
+ pass/fails (True/False). If every member of a subsection passes, it
+ will just have the value ``True``. (It also returns ``False`` if all
+ members fail).
+
+ In addition, it converts the values from strings to their native
+ types if their checks pass (and ``stringify`` is set).
+
+ If ``preserve_errors`` is ``True`` (``False`` is default) then instead
+ of a marking a fail with a ``False``, it will preserve the actual
+ exception object. This can contain info about the reason for failure.
+ For example the ``VdtValueTooSmallError`` indeicates that the value
+ supplied was too small. If a value (or section) is missing it will
+ still be marked as ``False``.
+
+ You must have the validate module to use ``preserve_errors=True``.
+
+ You can then use the ``flatten_errors`` function to turn your nested
+ results dictionary into a flattened list of failures - useful for
+ displaying meaningful error messages.
+ """
+ if section is None:
+ if self.configspec is None:
+ raise ValueError, 'No configspec supplied.'
+ if preserve_errors:
+ if VdtMissingValue is None:
+ raise ImportError('Missing validate module.')
+ section = self
+ #
+ spec_section = section.configspec
+ if copy and hasattr(section, '_configspec_initial_comment'):
+ section.initial_comment = section._configspec_initial_comment
+ section.final_comment = section._configspec_final_comment
+ section.encoding = section._configspec_encoding
+ section.BOM = section._configspec_BOM
+ section.newlines = section._configspec_newlines
+ section.indent_type = section._configspec_indent_type
+ if '__many__' in section.configspec:
+ many = spec_section['__many__']
+ # dynamically assign the configspecs
+ # for the sections below
+ for entry in section.sections:
+ self._handle_repeat(section[entry], many)
+ #
+ out = {}
+ ret_true = True
+ ret_false = True
+ order = [k for k in section._order if k in spec_section]
+ order += [k for k in spec_section if k not in order]
+ for entry in order:
+ if entry == '__many__':
+ continue
+ if (not entry in section.scalars) or (entry in section.defaults):
+ # missing entries
+ # or entries from defaults
+ missing = True
+ val = None
+ if copy and not entry in section.scalars:
+ # copy comments
+ section.comments[entry] = (
+ section._configspec_comments.get(entry, []))
+ section.inline_comments[entry] = (
+ section._configspec_inline_comments.get(entry, ''))
+ #
+ else:
+ missing = False
+ val = section[entry]
+ try:
+ check = validator.check(spec_section[entry],
+ val,
+ missing=missing
+ )
+ except validator.baseErrorClass, e:
+ if not preserve_errors or isinstance(e, VdtMissingValue):
+ out[entry] = False
+ else:
+ # preserve the error
+ out[entry] = e
+ ret_false = False
+ ret_true = False
+ else:
+ ret_false = False
+ out[entry] = True
+ if self.stringify or missing:
+ # if we are doing type conversion
+ # or the value is a supplied default
+ if not self.stringify:
+ if isinstance(check, (list, tuple)):
+ # preserve lists
+ check = [self._str(item) for item in check]
+ elif missing and check is None:
+ # convert the None from a default to a ''
+ check = ''
+ else:
+ check = self._str(check)
+ if (check != val) or missing:
+ section[entry] = check
+ if not copy and missing and entry not in section.defaults:
+ section.defaults.append(entry)
+ #
+ # Missing sections will have been created as empty ones when the
+ # configspec was read.
+ for entry in section.sections:
+ # FIXME: this means DEFAULT is not copied in copy mode
+ if section is self and entry == 'DEFAULT':
+ continue
+ if copy:
+ section.comments[entry] = section._cs_section_comments[entry]
+ section.inline_comments[entry] = (
+ section._cs_section_inline_comments[entry])
+ check = self.validate(validator, preserve_errors=preserve_errors,
+ copy=copy, section=section[entry])
+ out[entry] = check
+ if check == False:
+ ret_true = False
+ elif check == True:
+ ret_false = False
+ else:
+ ret_true = False
+ ret_false = False
+ #
+ if ret_true:
+ return True
+ elif ret_false:
+ return False
+ else:
+ return out
+
+class SimpleVal(object):
+ """
+ A simple validator.
+ Can be used to check that all members expected are present.
+
+ To use it, provide a configspec with all your members in (the value given
+ will be ignored). Pass an instance of ``SimpleVal`` to the ``validate``
+ method of your ``ConfigObj``. ``validate`` will return ``True`` if all
+ members are present, or a dictionary with True/False meaning
+ present/missing. (Whole missing sections will be replaced with ``False``)
+ """
+
+ def __init__(self):
+ self.baseErrorClass = ConfigObjError
+
+ def check(self, check, member, missing=False):
+ """A dummy check method, always returns the value unchanged."""
+ if missing:
+ raise self.baseErrorClass
+ return member
+
+# Check / processing functions for options
+def flatten_errors(cfg, res, levels=None, results=None):
+ """
+ An example function that will turn a nested dictionary of results
+ (as returned by ``ConfigObj.validate``) into a flat list.
+
+ ``cfg`` is the ConfigObj instance being checked, ``res`` is the results
+ dictionary returned by ``validate``.
+
+ (This is a recursive function, so you shouldn't use the ``levels`` or
+ ``results`` arguments - they are used by the function.
+
+ Returns a list of keys that failed. Each member of the list is a tuple :
+ ::
+
+ ([list of sections...], key, result)
+
+ If ``validate`` was called with ``preserve_errors=False`` (the default)
+ then ``result`` will always be ``False``.
+
+ *list of sections* is a flattened list of sections that the key was found
+ in.
+
+ If the section was missing then key will be ``None``.
+
+ If the value (or section) was missing then ``result`` will be ``False``.
+
+ If ``validate`` was called with ``preserve_errors=True`` and a value
+ was present, but failed the check, then ``result`` will be the exception
+ object returned. You can use this as a string that describes the failure.
+
+ For example *The value "3" is of the wrong type*.
+
+ >>> import validate
+ >>> vtor = validate.Validator()
+ >>> my_ini = '''
+ ... option1 = True
+ ... [section1]
+ ... option1 = True
+ ... [section2]
+ ... another_option = Probably
+ ... [section3]
+ ... another_option = True
+ ... [[section3b]]
+ ... value = 3
+ ... value2 = a
+ ... value3 = 11
+ ... '''
+ >>> my_cfg = '''
+ ... option1 = boolean()
+ ... option2 = boolean()
+ ... option3 = boolean(default=Bad_value)
+ ... [section1]
+ ... option1 = boolean()
+ ... option2 = boolean()
+ ... option3 = boolean(default=Bad_value)
+ ... [section2]
+ ... another_option = boolean()
+ ... [section3]
+ ... another_option = boolean()
+ ... [[section3b]]
+ ... value = integer
+ ... value2 = integer
+ ... value3 = integer(0, 10)
+ ... [[[section3b-sub]]]
+ ... value = string
+ ... [section4]
+ ... another_option = boolean()
+ ... '''
+ >>> cs = my_cfg.split('\\n')
+ >>> ini = my_ini.split('\\n')
+ >>> cfg = ConfigObj(ini, configspec=cs)
+ >>> res = cfg.validate(vtor, preserve_errors=True)
+ >>> errors = []
+ >>> for entry in flatten_errors(cfg, res):
+ ... section_list, key, error = entry
+ ... section_list.insert(0, '[root]')
+ ... if key is not None:
+ ... section_list.append(key)
+ ... else:
+ ... section_list.append('[missing]')
+ ... section_string = ', '.join(section_list)
+ ... errors.append((section_string, ' = ', error))
+ >>> errors.sort()
+ >>> for entry in errors:
+ ... print entry[0], entry[1], (entry[2] or 0)
+ [root], option2 = 0
+ [root], option3 = the value "Bad_value" is of the wrong type.
+ [root], section1, option2 = 0
+ [root], section1, option3 = the value "Bad_value" is of the wrong type.
+ [root], section2, another_option = the value "Probably" is of the wrong type.
+ [root], section3, section3b, section3b-sub, [missing] = 0
+ [root], section3, section3b, value2 = the value "a" is of the wrong type.
+ [root], section3, section3b, value3 = the value "11" is too big.
+ [root], section4, [missing] = 0
+ """
+ if levels is None:
+ # first time called
+ levels = []
+ results = []
+ if res is True:
+ return results
+ if res is False:
+ results.append((levels[:], None, False))
+ if levels:
+ levels.pop()
+ return results
+ for (key, val) in res.items():
+ if val == True:
+ continue
+ if isinstance(cfg.get(key), dict):
+ # Go down one level
+ levels.append(key)
+ flatten_errors(cfg[key], val, levels, results)
+ continue
+ results.append((levels[:], key, val))
+ #
+ # Go up one level
+ if levels:
+ levels.pop()
+ #
+ return results
+
+"""*A programming language is a medium of expression.* - Paul Graham"""
diff --git a/config/makefiles/autotargets.mk b/config/makefiles/autotargets.mk
new file mode 100644
index 0000000..16e06fb
--- /dev/null
+++ b/config/makefiles/autotargets.mk
@@ -0,0 +1,94 @@
+# -*- makefile -*-
+# vim:set ts=8 sw=8 sts=8 noet:
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+#
+
+ifndef INCLUDED_AUTOTARGETS_MK #{
+
+# Conditional does not wrap the entire file so multiple
+# includes will be able to accumulate dependencies.
+
+###########################################################################
+# AUTO_DEPS - A list of deps/targets drived from other macros.
+###########################################################################
+
+MKDIR ?= mkdir -p
+TOUCH ?= touch
+
+# declare for local use, rules.mk may not have been loaded
+space = $(NULL) $(NULL)
+
+# Deps will be considered intermediate when used as a pre-requisite for
+# wildcard targets. Inhibit their removal, mkdir -p is a standalone op.
+.PRECIOUS: %/.mkdir.done
+
+#########################
+##---] FUNCTIONS [---##
+#########################
+
+# Squeeze can be overzealous, restore root for abspath
+getPathPrefix =$(if $(filter /%,$(1)),/)
+
+# Squeeze '//' from the path, easily created by string functions
+_slashSqueeze =$(foreach val,$(getargv),$(call getPathPrefix,$(val))$(subst $(space),/,$(strip $(subst /,$(space),$(val)))))
+
+# Squeeze extraneous directory slashes from the path
+# o protect embedded spaces within the path
+# o replace //+ sequences with /
+slash_strip = \
+ $(strip \
+ $(subst <--[**]-->,$(space),\
+ $(call _slashSqueeze,\
+ $(subst $(space),<--[**]-->,$(1))\
+ )))
+
+# Extract directory path from a dependency file.
+mkdir_stem =$(foreach val,$(getargv),$(subst /.mkdir.done,$(NULL),$(val)))
+
+## Generate timestamp file for threadsafe directory creation
+mkdir_deps =$(foreach dir,$(getargv),$(call slash_strip,$(dir)/.mkdir.done))
+
+#######################
+##---] TARGETS [---##
+#######################
+
+%/.mkdir.done: # mkdir -p -p => mkdir -p
+ $(subst $(space)-p,$(null),$(MKDIR)) -p '$(dir $@)'
+# Make the timestamp old enough for not being a problem with symbolic links
+# targets depending on it. Use Jan 3, 1980 to accomodate any timezone where
+# 198001010000 would translate to something older than FAT epoch.
+ @$(TOUCH) -t 198001030000 '$@'
+
+# A handful of makefiles are attempting "mkdir dot".
+# tbpl/valgrind builds are using this target
+# https://bugzilla.mozilla.org/show_bug.cgi?id=837754
+.mkdir.done:
+ @echo 'WARNING: $(MKDIR) -dot- requested by $(MAKE) -C $(CURDIR) $(MAKECMDGOALS)'
+ @$(TOUCH) -t 198001030000 '$@'
+
+INCLUDED_AUTOTARGETS_MK = 1
+endif #}
+
+
+## Accumulate deps and cleanup
+ifneq (,$(GENERATED_DIRS))
+ GENERATED_DIRS := $(strip $(sort $(GENERATED_DIRS)))
+ tmpauto :=$(call mkdir_deps,GENERATED_DIRS)
+ GENERATED_DIRS_DEPS +=$(tmpauto)
+ GARBAGE_DIRS +=$(GENERATED_DIRS)
+endif
+
+#################################################################
+# One ring/dep to rule them all:
+# config/rules.mk::all target is available by default
+# Add $(AUTO_DEPS) as an explicit target dependency when needed.
+#################################################################
+
+AUTO_DEPS +=$(GENERATED_DIRS_DEPS)
+AUTO_DEPS := $(strip $(sort $(AUTO_DEPS)))
+
+# Complain loudly if deps have not loaded so getargv != $(NULL)
+$(call requiredfunction,getargv)
diff --git a/config/makefiles/makeutils.mk b/config/makefiles/makeutils.mk
new file mode 100644
index 0000000..d7c5c97
--- /dev/null
+++ b/config/makefiles/makeutils.mk
@@ -0,0 +1,117 @@
+# -*- makefile -*-
+# vim:set ts=8 sw=8 sts=8 noet:
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+## Identify function argument types
+istype =$(if $(value ${1}),list,scalar)
+isval =$(if $(filter-out list,$(call istype,${1})),true)
+isvar =$(if $(filter-out scalar,$(call istype,${1})),true)
+
+# Access up to 9 arguments passed, option needed to emulate $*
+# Inline for function expansion, do not use $(call )
+argv =$(strip
+argv +=$(if $(1), $(1))$(if $(2), $(2))$(if $(3), $(3))$(if $(4), $(4))
+argv +=$(if $(5), $(5))$(if $(6), $(6))$(if $(7), $(7))$(if $(8), $(8))
+argv +=$(if $(9), $(9))
+argv +=$(if $(10), $(error makeutils.mk::argv can only handle 9 arguments))
+argv +=)
+
+###########################################################################
+## Access function args as a simple list, inline within user functions.
+## Usage: $(info ** $(call banner,$(getargv)))
+## $(call banner,scalar)
+## $(call banner,list0 list1 list2)
+## $(call banner,ref) ; ref=foo bar tans
+## getarglist() would be a more accurate name but is longer to type
+getargv = $(if $(call isvar,$(1)),$($(1)),$(argv))
+
+###########################################################################
+# Strip [n] leading options from an argument list. This will allow passing
+# extra args to user functions that will not propogate to sub-$(call )'s
+# Usage: $(call subargv,2)
+subargv =$(wordlist $(1),$(words $(getargv)),$(getargv))
+
+###########################################################################
+# Intent: Display a distinct banner heading in the output stream
+# Usage: $(call banner,BUILDING: foo bar tans)
+# Debug:
+# target-preqs = \
+# $(call banner,target-preqs-BEGIN) \
+# foo bar tans \
+# $(call banner,target-preqs-END) \
+# $(NULL)
+# target: $(target-preqs)
+
+banner = \
+$(info ) \
+$(info ***************************************************************************) \
+$(info ** $(getargv)) \
+$(info ***************************************************************************) \
+$(NULL)
+
+#####################################################################
+# Intent: Determine if a string or pattern is contained in a list
+# Usage: strcmp - $(call if_XinY,clean,$(MAKECMDGOALS))
+# : pattern - $(call if_XinY,clean%,$(MAKECMDGOALS))
+is_XinY =$(filter $(1),$(call subargv,3,$(getargv)))
+
+#####################################################################
+# Provide an alternate var to support testing
+ifdef MAKEUTILS_UNIT_TEST
+ mcg_goals=TEST_MAKECMDGOALS
+else
+ mcg_goals=MAKECMDGOALS
+endif
+
+# Intent: Conditionals for detecting common/tier target use
+isTargetStem = $(sort \
+ $(foreach var,$(getargv),\
+ $(foreach pat,$(var)% %$(var),\
+ $(call is_XinY,$(pat),${$(mcg_goals)})\
+ )))
+isTargetStemClean = $(call isTargetStem,clean)
+isTargetStemExport = $(call isTargetStem,export)
+isTargetStemLibs = $(call isTargetStem,libs)
+isTargetStemTools = $(call isTargetStem,tools)
+
+##################################################
+# Intent: Validation functions / unit test helpers
+
+errorifneq =$(if $(subst $(strip $(1)),$(NULL),$(strip $(2))),$(error expected [$(1)] but found [$(2)]))
+
+# Intent: verify function declaration exists
+requiredfunction =$(foreach func,$(1) $(2) $(3) $(4) $(5) $(6) $(7) $(8) $(9),$(if $(value $(func)),$(NULL),$(error required function [$(func)] is unavailable)))
+
+
+
+## http://www.gnu.org/software/make/manual/make.html#Call-Function
+## Usage: o = $(call map,origin,o map $(MAKE))
+map = $(foreach val,$(2),$(call $(1),$(val)))
+
+
+## Disable checking for clean targets
+ifeq (,$(filter %clean clean%,$(MAKECMDGOALS))) #{
+
+# Usage: $(call checkIfEmpty,[error|warning] foo NULL bar)
+checkIfEmpty =$(foreach var,$(wordlist 2,100,$(argv)),$(if $(strip $($(var))),$(NOP),$(call $(1),Variable $(var) does not contain a value)))
+
+# Usage: $(call errorIfEmpty,foo NULL bar)
+errorIfEmpty =$(call checkIfEmpty,error $(argv))
+warnIfEmpty =$(call checkIfEmpty,warning $(argv))
+
+endif #}
+
+###########################################################################
+## Common makefile library loader
+###########################################################################
+topORerr =$(if $(topsrcdir),$(topsrcdir),$(error topsrcdir is not defined))
+
+ifdef USE_AUTOTARGETS_MK # mkdir_deps
+ include $(topORerr)/config/makefiles/autotargets.mk
+endif
+
+## copy(src, dst): recursive copy
+copy_dir = (cd $(1)/. && $(TAR) $(TAR_CREATE_FLAGS) - .) | (cd $(2)/. && tar -xf -)
diff --git a/config/printconfigsetting.py b/config/printconfigsetting.py
new file mode 100644
index 0000000..bdd6f2a
--- /dev/null
+++ b/config/printconfigsetting.py
@@ -0,0 +1,25 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import configobj, sys
+
+try:
+ (file, section, key) = sys.argv[1:]
+except ValueError:
+ print "Usage: printconfigsetting.py <file> <section> <setting>"
+ sys.exit(1)
+
+c = configobj.ConfigObj(file)
+
+try:
+ s = c[section]
+except KeyError:
+ print >>sys.stderr, "Section [%s] not found." % section
+ sys.exit(1)
+
+try:
+ print s[key]
+except KeyError:
+ print >>sys.stderr, "Key %s not found." % key
+ sys.exit(1)
diff --git a/config/recurse.mk b/config/recurse.mk
new file mode 100644
index 0000000..8afd3c9
--- /dev/null
+++ b/config/recurse.mk
@@ -0,0 +1,9 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+ifndef INCLUDED_RULES_MK
+include $(topsrcdir)/config/rules.mk
+endif
+
+include $(MOZILLA_DIR)/config/recurse.mk
diff --git a/config/rules.mk b/config/rules.mk
new file mode 100644
index 0000000..0a32811
--- /dev/null
+++ b/config/rules.mk
@@ -0,0 +1,13 @@
+# vim:set ts=8 sw=8 sts=8 noet:
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+#
+
+ifndef topsrcdir
+$(error topsrcdir was not set))
+endif
+
+# Use mozilla-central's copy of rules.mk.
+include $(topsrcdir)/platform/config/rules.mk
diff --git a/configure.in b/configure.in
new file mode 100644
index 0000000..a2acd90
--- /dev/null
+++ b/configure.in
@@ -0,0 +1,38 @@
+dnl -*- Mode: Autoconf; tab-width: 4; indent-tabs-mode: nil; -*-
+dnl vi: set tabstop=4 shiftwidth=4 expandtab:
+dnl This Source Code Form is subject to the terms of the Mozilla Public
+dnl License, v. 2.0. If a copy of the MPL was not distributed with this
+dnl file, You can obtain one at http://mozilla.org/MPL/2.0/.
+dnl
+dnl This is a not-really-autoconf script (which is to say, it's a shell script
+dnl that is piped through m4 first) that executes the mozilla-central python
+dnl configure, first doing a little bit of processing to handle mozconfig and
+dnl the --with-external-source-dir rules.
+dnl ========================================================
+divert(0)dnl
+#!/bin/sh
+SRCDIR=$(dirname $0)
+TOPSRCDIR="$SRCDIR"
+MOZILLA_SRCDIR="${SRCDIR}/platform"
+export OLD_CONFIGURE="${MOZILLA_SRCDIR}"/old-configure
+
+# Ensure the comm-* values are used.
+export MOZ_SOURCE_CHANGESET=$(hg -R "$TOPSRCDIR" parent --template="{node}" 2>/dev/null)
+export MOZ_SOURCE_REPO=$(hg -R "$TOPSRCDIR" showconfig paths.default 2>/dev/null | sed -e "s/^ssh:/https:/")
+
+# If MOZCONFIG isn't set, use the .mozconfig from the current directory. This
+# overrides the lookup in mozilla-central's configure, which looks in the wrong
+# directory for this file.
+if test -z "$MOZCONFIG" -a -f "$SRCDIR"/.mozconfig; then
+ export MOZCONFIG="$SRCDIR"/.mozconfig
+elif test -z "$MOZCONFIG" -a -f "$SRCDIR"/mozconfig; then
+ export MOZCONFIG="$SRCDIR"/mozconfig
+fi
+
+# Execute the mozilla configure script in the current directory, adding the
+# parameter we need to run comm-central. Since the configure script is really
+# just a wrapper around invoking a python variant, execute the underlying python
+# directly. We use a copy of the underlying configure script to get paths
+# correct.
+set -- "$@" --with-external-source-dir="$TOPSRCDIR"
+which python2.7 > /dev/null && exec python2.7 "$TOPSRCDIR/configure.py" "$@" || exec python "$TOPSRCDIR/configure.py" "$@"
diff --git a/configure.py b/configure.py
new file mode 100644
index 0000000..64bf38b
--- /dev/null
+++ b/configure.py
@@ -0,0 +1,32 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import print_function, unicode_literals
+
+import imp
+import os
+import sys
+
+
+base_dir = os.path.abspath(os.path.dirname(__file__))
+sys.path.append(os.path.join(base_dir, 'platform', 'python', 'mozbuild'))
+from mozbuild.configure import ConfigureSandbox
+
+# We can't just import config_status since configure is shadowed by this file!
+f, pathname, desc = imp.find_module('configure',
+ [os.path.join(base_dir, 'platform')])
+config_status = imp.load_module('configure', f, pathname, desc).config_status
+
+def main(argv):
+ config = {}
+ sandbox = ConfigureSandbox(config, os.environ, argv)
+ sandbox.run(os.path.join(os.path.dirname(__file__), 'moz.configure'))
+
+ if sandbox._help:
+ return 0
+
+ return config_status(config)
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/mach b/mach
new file mode 100644
index 0000000..0fbedf4
--- /dev/null
+++ b/mach
@@ -0,0 +1,4 @@
+#!/bin/sh
+
+MACH_CMD=./platform/mach
+$MACH_CMD $@
diff --git a/moz.build b/moz.build
new file mode 100644
index 0000000..6001f7e
--- /dev/null
+++ b/moz.build
@@ -0,0 +1,7 @@
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# This file needs to stay here even if empty so that mach will work,
+# specifically commands like mach file-info.
diff --git a/moz.configure b/moz.configure
new file mode 100644
index 0000000..5e0ae03
--- /dev/null
+++ b/moz.configure
@@ -0,0 +1,7 @@
+# -*- Mode: python; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+include('platform/moz.configure')