From 0237c250178f269dad3b4a176608e6781421513689429a30df7da51571d7e293 Mon Sep 17 00:00:00 2001 From: Christian Boltz Date: Tue, 8 May 2012 20:39:34 +0000 Subject: [PATCH] Accepting request 116784 from home:cboltz - add apparmor-techdoc.patch to remove traces of the build time in PDF files - update to AppArmor 2.8 beta5 (= 2.7.103 / r2031) - new utility aa-exec to confine a program with the specified AppArmor profile - add support for mount rules - see http://wiki.apparmor.net/index.php/ReleaseNotes_2_8 for full upstream changelog - removed upstreamed and backported patches - remove outdated autobuild and "disable repo" patches that were disabled since the AppArmor 2.7 package - create the Immunix::SubDomain compat perl module only for openSUSE <= 12.1 (bnc#720617 #c7) OBS-URL: https://build.opensuse.org/request/show/116784 OBS-URL: https://build.opensuse.org/package/show/security:apparmor/apparmor?expand=0&rev=10 --- 0001-fix-for-lp929531.patch | 19 - apparmor-2.5.1-rpmlint-asprintf | 82 - apparmor-2.5.1-unified-build | 13059 ---------------- apparmor-2.7.103.tar.gz | 3 + apparmor-2.7.2.tar.gz | 3 - ...r-r2022-log-parser-network-bnc755923.patch | 308 - apparmor-remove-repo | 56 - apparmor-techdoc.patch | 80 + apparmor-utils-subdomain-compat | 2 +- apparmor.changes | 19 + apparmor.spec | 46 +- 11 files changed, 126 insertions(+), 13551 deletions(-) delete mode 100644 0001-fix-for-lp929531.patch delete mode 100644 apparmor-2.5.1-rpmlint-asprintf delete mode 100644 apparmor-2.5.1-unified-build create mode 100644 apparmor-2.7.103.tar.gz delete mode 100644 apparmor-2.7.2.tar.gz delete mode 100644 apparmor-r2022-log-parser-network-bnc755923.patch delete mode 100644 apparmor-remove-repo create mode 100644 apparmor-techdoc.patch diff --git a/0001-fix-for-lp929531.patch b/0001-fix-for-lp929531.patch deleted file mode 100644 index 0dc98b3..0000000 --- a/0001-fix-for-lp929531.patch +++ /dev/null @@ -1,19 +0,0 @@ -Author: Jamie Strandboge -Description: glibc's __get_nprocs() now checks /sys/devices/system/cpu/online - in addition to /proc/stat for the number of processors. This is used in the - _SC_NPROCESSORS_ONLN implementation, a part of sysconf. This was introduced in - upstream glibc commit: - http://repo.or.cz/w/glibc.git/patch/84e2a551a72c79b020694bb327e33b6d71b09b63 -Bug-Ubuntu: https://launchpad.net/bugs/929531 -Index: apparmor-2.7.0/profiles/apparmor.d/abstractions/base -=================================================================== ---- apparmor-2.7.0.orig/profiles/apparmor.d/abstractions/base 2012-02-09 07:57:35.000000000 -0600 -+++ apparmor-2.7.0/profiles/apparmor.d/abstractions/base 2012-02-09 08:01:13.000000000 -0600 -@@ -86,6 +86,7 @@ - @{PROC}/meminfo r, - @{PROC}/stat r, - @{PROC}/cpuinfo r, -+ /sys/devices/system/cpu/online r, - - # glibc's *printf protections read the maps file - @{PROC}/*/maps r, diff --git a/apparmor-2.5.1-rpmlint-asprintf b/apparmor-2.5.1-rpmlint-asprintf deleted file mode 100644 index 4d75dc3..0000000 --- a/apparmor-2.5.1-rpmlint-asprintf +++ /dev/null @@ -1,82 +0,0 @@ -From: Jeff Mahoney -Subject: apparmor: Use _GNU_SOURCE when asprintf is used - - There are a few places in the parser that use asprintf but don't actually - get the prototype from stdio.h. _GNU_SOURCE is needed for that. - - It works as-is but rpmlint in the openSUSE Build Service complains about it. - -Signed-off-by: Jeff Mahoney ---- - parser/Makefile.am | 1 + - parser/parser_include.c | 2 ++ - parser/parser_interface.c | 1 + - parser/parser_lex.l | 4 ++++ - parser/parser_main.c | 1 + - parser/parser_variable.c | 1 + - 6 files changed, 10 insertions(+) - ---- a/parser/Makefile.am -+++ b/parser/Makefile.am -@@ -14,6 +14,7 @@ dist_man_MANS = apparmor.d.5 apparmor.7 - BUILT_SOURCES = parser_lex.c parser_yacc.c af_names.h cap_names.h - AM_YFLAGS = -d - AM_CFLAGS = -DLOCALEDIR=\"$(localedir)\" -+AM_LFLAGS = -D_GNU_SOURCE - apparmor_parser_SOURCES = parser_yacc.y parser_lex.l parser_include.c \ - parser_interface.c parser_main.c parser_misc.c \ - parser_merge.c parser_symtab.c parser_regex.c \ ---- a/parser/parser_include.c -+++ b/parser/parser_include.c -@@ -35,6 +35,8 @@ - - */ - -+#define _GNU_SOURCE /* for asprintf in stdio.h */ -+ - #include - #include - #include ---- a/parser/parser_interface.c -+++ b/parser/parser_interface.c -@@ -15,6 +15,7 @@ - * along with this program; if not, contact Novell, Inc. - */ - -+#define _GNU_SOURCE /* for asprintf in stdio.h */ - #include - #include - #include ---- a/parser/parser_lex.l -+++ b/parser/parser_lex.l -@@ -20,6 +20,10 @@ - /* Definitions section */ - /* %option main */ - -+%{ -+#define _GNU_SOURCE /* for asprintf in stdio.h */ -+%} -+ - /* eliminates need to link with libfl */ - %option noyywrap - %option nounput ---- a/parser/parser_main.c -+++ b/parser/parser_main.c -@@ -19,6 +19,7 @@ - * Ltd. - */ - -+#define _GNU_SOURCE /* for asprintf in stdio.h */ - #include - #include - #include ---- a/parser/parser_variable.c -+++ b/parser/parser_variable.c -@@ -15,6 +15,7 @@ - * along with this program; if not, contact Novell, Inc. - */ - -+#define _GNU_SOURCE /* for asprintf in stdio.h */ - #include - #include - #include diff --git a/apparmor-2.5.1-unified-build b/apparmor-2.5.1-unified-build deleted file mode 100644 index 24308f0..0000000 --- a/apparmor-2.5.1-unified-build +++ /dev/null @@ -1,13059 +0,0 @@ -From: Jeff Mahoney -Subject: apparmor: Use autoconf - - The apparmor build system is currently a mismash of hand-coded makefiles - that don't do anything particularly original. This patch unifies the - build system to use a single configure script. - - - Pulls in the relevant m4 macro files - - Adds relevant "needed" autoconf files to package root directory - - Removes the old autoconf files from subdirectories - - Converts hand-written makefiles to Makefile.am - - Adds missing includes/defines as needed - - - I generally don't touch the files that are already autogenerated. It's - expected that autoreconf will be run before the next tarball is - produced. There's no sense in maintaining the autogenerated ones in - the patch. - -Signed-off-by: Jeff Mahoney ---- - - AUTHORS | 1 - ChangeLog | 1 - INSTALL | 365 + - Makefile.am | 2 - NEWS | 1 - changehat/Makefile.am | 1 - changehat/mod_apparmor/Makefile.am | 23 - changehat/mod_apparmor/apache2-mod_apparmor.spec.in | 215 - changehat/pam_apparmor/COPYING | 39 - changehat/pam_apparmor/Makefile.am | 9 - changehat/pam_apparmor/pam_apparmor.changes | 49 - changehat/pam_apparmor/pam_apparmor.spec.in | 83 - changehat/tomcat_apparmor/Makefile.am | 1 - changehat/tomcat_apparmor/tomcat_5_0/Makefile.am | 2 - changehat/tomcat_apparmor/tomcat_5_5/Makefile.am | 13 - changehat/tomcat_apparmor/tomcat_5_5/build.xml | 11 - changehat/tomcat_apparmor/tomcat_5_5/src/Makefile.am | 1 - changehat/tomcat_apparmor/tomcat_5_5/src/jni_src/Makefile.am | 17 - configure.in | 203 - deprecated/Makefile.am | 2 - deprecated/management/Makefile.am | 1 - deprecated/management/apparmor-dbus/Makefile.am | 2 - deprecated/management/apparmor-dbus/src/Makefile.am | 3 - deprecated/management/profile-editor/Makefile.am | 2 - deprecated/management/profile-editor/src/Makefile.am | 6 - deprecated/management/profile-editor/src/wxStyledTextCtrl/Makefile.am | 4 - libraries/Makefile.am | 1 - libraries/libapparmor/AUTHORS | 2 - libraries/libapparmor/ChangeLog | 1 - libraries/libapparmor/INSTALL | 236 - libraries/libapparmor/NEWS | 1 - libraries/libapparmor/README | 1 - libraries/libapparmor/autogen.sh | 42 - libraries/libapparmor/compile | 143 - libraries/libapparmor/config.guess | 1502 ---- - libraries/libapparmor/config.sub | 1714 ----- - libraries/libapparmor/doc/Makefile.am | 14 - libraries/libapparmor/install-sh | 520 - - libraries/libapparmor/libapparmor1.spec | 178 - libraries/libapparmor/m4/ac_pod2man.m4 | 16 - libraries/libapparmor/m4/ac_python_devel.m4 | 193 - libraries/libapparmor/src/Makefile.am | 4 - libraries/libapparmor/swig/perl/Makefile.PL.in | 17 - libraries/libapparmor/swig/perl/Makefile.am | 37 - libraries/libapparmor/swig/python/Makefile.am | 2 - libraries/libapparmor/swig/ruby/Makefile.am | 29 - libraries/libapparmor/swig/ruby/extconf.rb | 37 - libraries/libapparmor/testsuite/Makefile.am | 2 - m4/ac_pod2man.m4 | 16 - m4/ac_python_devel.m4 | 209 - m4/am_path_apxs.m4 | 12 - m4/am_path_perl.m4 | 25 - m4/am_path_ruby.m4 | 115 - m4/wxwidgets.m4 | 37 - parser/Makefile.am | 81 - parser/libapparmor_re/Makefile.am | 4 - parser/libapparmor_re/regexp.y | 3082 ---------- - parser/libapparmor_re/regexp.yy | 3082 ++++++++++ - parser/parser_alias.c | 1 - parser/parser_main.c | 3 - parser/parser_policy.c | 1 - parser/parser_regex.c | 2 - parser/parser_symtab.c | 1 - parser/po/Makefile | 8 - po/Makefile.am | 2 - profiles/Makefile | 2 - tests/Makefile.am | 1 - tests/regression/Makefile.am | 1 - tests/regression/subdomain/Makefile.am | 109 - utils/Immunix/Makefile.am | 3 - utils/Makefile.PL | 15 - utils/Makefile.am | 36 - utils/po/Makefile | 8 - 73 files changed, 4463 insertions(+), 8142 deletions(-) - - ---- /dev/null -+++ b/AUTHORS -@@ -0,0 +1 @@ -+ ---- /dev/null -+++ b/ChangeLog -@@ -0,0 +1 @@ -+ ---- /dev/null -+++ b/INSTALL -@@ -0,0 +1,365 @@ -+Installation Instructions -+************************* -+ -+Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004, 2005, -+2006, 2007, 2008, 2009 Free Software Foundation, Inc. -+ -+ Copying and distribution of this file, with or without modification, -+are permitted in any medium without royalty provided the copyright -+notice and this notice are preserved. This file is offered as-is, -+without warranty of any kind. -+ -+Basic Installation -+================== -+ -+ Briefly, the shell commands `./configure; make; make install' should -+configure, build, and install this package. The following -+more-detailed instructions are generic; see the `README' file for -+instructions specific to this package. Some packages provide this -+`INSTALL' file but do not implement all of the features documented -+below. The lack of an optional feature in a given package is not -+necessarily a bug. More recommendations for GNU packages can be found -+in *note Makefile Conventions: (standards)Makefile Conventions. -+ -+ The `configure' shell script attempts to guess correct values for -+various system-dependent variables used during compilation. It uses -+those values to create a `Makefile' in each directory of the package. -+It may also create one or more `.h' files containing system-dependent -+definitions. Finally, it creates a shell script `config.status' that -+you can run in the future to recreate the current configuration, and a -+file `config.log' containing compiler output (useful mainly for -+debugging `configure'). -+ -+ It can also use an optional file (typically called `config.cache' -+and enabled with `--cache-file=config.cache' or simply `-C') that saves -+the results of its tests to speed up reconfiguring. Caching is -+disabled by default to prevent problems with accidental use of stale -+cache files. -+ -+ If you need to do unusual things to compile the package, please try -+to figure out how `configure' could check whether to do them, and mail -+diffs or instructions to the address given in the `README' so they can -+be considered for the next release. If you are using the cache, and at -+some point `config.cache' contains results you don't want to keep, you -+may remove or edit it. -+ -+ The file `configure.ac' (or `configure.in') is used to create -+`configure' by a program called `autoconf'. You need `configure.ac' if -+you want to change it or regenerate `configure' using a newer version -+of `autoconf'. -+ -+ The simplest way to compile this package is: -+ -+ 1. `cd' to the directory containing the package's source code and type -+ `./configure' to configure the package for your system. -+ -+ Running `configure' might take a while. While running, it prints -+ some messages telling which features it is checking for. -+ -+ 2. Type `make' to compile the package. -+ -+ 3. Optionally, type `make check' to run any self-tests that come with -+ the package, generally using the just-built uninstalled binaries. -+ -+ 4. Type `make install' to install the programs and any data files and -+ documentation. When installing into a prefix owned by root, it is -+ recommended that the package be configured and built as a regular -+ user, and only the `make install' phase executed with root -+ privileges. -+ -+ 5. Optionally, type `make installcheck' to repeat any self-tests, but -+ this time using the binaries in their final installed location. -+ This target does not install anything. Running this target as a -+ regular user, particularly if the prior `make install' required -+ root privileges, verifies that the installation completed -+ correctly. -+ -+ 6. You can remove the program binaries and object files from the -+ source code directory by typing `make clean'. To also remove the -+ files that `configure' created (so you can compile the package for -+ a different kind of computer), type `make distclean'. There is -+ also a `make maintainer-clean' target, but that is intended mainly -+ for the package's developers. If you use it, you may have to get -+ all sorts of other programs in order to regenerate files that came -+ with the distribution. -+ -+ 7. Often, you can also type `make uninstall' to remove the installed -+ files again. In practice, not all packages have tested that -+ uninstallation works correctly, even though it is required by the -+ GNU Coding Standards. -+ -+ 8. Some packages, particularly those that use Automake, provide `make -+ distcheck', which can by used by developers to test that all other -+ targets like `make install' and `make uninstall' work correctly. -+ This target is generally not run by end users. -+ -+Compilers and Options -+===================== -+ -+ Some systems require unusual options for compilation or linking that -+the `configure' script does not know about. Run `./configure --help' -+for details on some of the pertinent environment variables. -+ -+ You can give `configure' initial values for configuration parameters -+by setting variables in the command line or in the environment. Here -+is an example: -+ -+ ./configure CC=c99 CFLAGS=-g LIBS=-lposix -+ -+ *Note Defining Variables::, for more details. -+ -+Compiling For Multiple Architectures -+==================================== -+ -+ You can compile the package for more than one kind of computer at the -+same time, by placing the object files for each architecture in their -+own directory. To do this, you can use GNU `make'. `cd' to the -+directory where you want the object files and executables to go and run -+the `configure' script. `configure' automatically checks for the -+source code in the directory that `configure' is in and in `..'. This -+is known as a "VPATH" build. -+ -+ With a non-GNU `make', it is safer to compile the package for one -+architecture at a time in the source code directory. After you have -+installed the package for one architecture, use `make distclean' before -+reconfiguring for another architecture. -+ -+ On MacOS X 10.5 and later systems, you can create libraries and -+executables that work on multiple system types--known as "fat" or -+"universal" binaries--by specifying multiple `-arch' options to the -+compiler but only a single `-arch' option to the preprocessor. Like -+this: -+ -+ ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ -+ CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ -+ CPP="gcc -E" CXXCPP="g++ -E" -+ -+ This is not guaranteed to produce working output in all cases, you -+may have to build one architecture at a time and combine the results -+using the `lipo' tool if you have problems. -+ -+Installation Names -+================== -+ -+ By default, `make install' installs the package's commands under -+`/usr/local/bin', include files under `/usr/local/include', etc. You -+can specify an installation prefix other than `/usr/local' by giving -+`configure' the option `--prefix=PREFIX', where PREFIX must be an -+absolute file name. -+ -+ You can specify separate installation prefixes for -+architecture-specific files and architecture-independent files. If you -+pass the option `--exec-prefix=PREFIX' to `configure', the package uses -+PREFIX as the prefix for installing programs and libraries. -+Documentation and other data files still use the regular prefix. -+ -+ In addition, if you use an unusual directory layout you can give -+options like `--bindir=DIR' to specify different values for particular -+kinds of files. Run `configure --help' for a list of the directories -+you can set and what kinds of files go in them. In general, the -+default for these options is expressed in terms of `${prefix}', so that -+specifying just `--prefix' will affect all of the other directory -+specifications that were not explicitly provided. -+ -+ The most portable way to affect installation locations is to pass the -+correct locations to `configure'; however, many packages provide one or -+both of the following shortcuts of passing variable assignments to the -+`make install' command line to change installation locations without -+having to reconfigure or recompile. -+ -+ The first method involves providing an override variable for each -+affected directory. For example, `make install -+prefix=/alternate/directory' will choose an alternate location for all -+directory configuration variables that were expressed in terms of -+`${prefix}'. Any directories that were specified during `configure', -+but not in terms of `${prefix}', must each be overridden at install -+time for the entire installation to be relocated. The approach of -+makefile variable overrides for each directory variable is required by -+the GNU Coding Standards, and ideally causes no recompilation. -+However, some platforms have known limitations with the semantics of -+shared libraries that end up requiring recompilation when using this -+method, particularly noticeable in packages that use GNU Libtool. -+ -+ The second method involves providing the `DESTDIR' variable. For -+example, `make install DESTDIR=/alternate/directory' will prepend -+`/alternate/directory' before all installation names. The approach of -+`DESTDIR' overrides is not required by the GNU Coding Standards, and -+does not work on platforms that have drive letters. On the other hand, -+it does better at avoiding recompilation issues, and works well even -+when some directory options were not specified in terms of `${prefix}' -+at `configure' time. -+ -+Optional Features -+================= -+ -+ If the package supports it, you can cause programs to be installed -+with an extra prefix or suffix on their names by giving `configure' the -+option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'. -+ -+ Some packages pay attention to `--enable-FEATURE' options to -+`configure', where FEATURE indicates an optional part of the package. -+They may also pay attention to `--with-PACKAGE' options, where PACKAGE -+is something like `gnu-as' or `x' (for the X Window System). The -+`README' should mention any `--enable-' and `--with-' options that the -+package recognizes. -+ -+ For packages that use the X Window System, `configure' can usually -+find the X include and library files automatically, but if it doesn't, -+you can use the `configure' options `--x-includes=DIR' and -+`--x-libraries=DIR' to specify their locations. -+ -+ Some packages offer the ability to configure how verbose the -+execution of `make' will be. For these packages, running `./configure -+--enable-silent-rules' sets the default to minimal output, which can be -+overridden with `make V=1'; while running `./configure -+--disable-silent-rules' sets the default to verbose, which can be -+overridden with `make V=0'. -+ -+Particular systems -+================== -+ -+ On HP-UX, the default C compiler is not ANSI C compatible. If GNU -+CC is not installed, it is recommended to use the following options in -+order to use an ANSI C compiler: -+ -+ ./configure CC="cc -Ae -D_XOPEN_SOURCE=500" -+ -+and if that doesn't work, install pre-built binaries of GCC for HP-UX. -+ -+ On OSF/1 a.k.a. Tru64, some versions of the default C compiler cannot -+parse its `' header file. The option `-nodtk' can be used as -+a workaround. If GNU CC is not installed, it is therefore recommended -+to try -+ -+ ./configure CC="cc" -+ -+and if that doesn't work, try -+ -+ ./configure CC="cc -nodtk" -+ -+ On Solaris, don't put `/usr/ucb' early in your `PATH'. This -+directory contains several dysfunctional programs; working variants of -+these programs are available in `/usr/bin'. So, if you need `/usr/ucb' -+in your `PATH', put it _after_ `/usr/bin'. -+ -+ On Haiku, software installed for all users goes in `/boot/common', -+not `/usr/local'. It is recommended to use the following options: -+ -+ ./configure --prefix=/boot/common -+ -+Specifying the System Type -+========================== -+ -+ There may be some features `configure' cannot figure out -+automatically, but needs to determine by the type of machine the package -+will run on. Usually, assuming the package is built to be run on the -+_same_ architectures, `configure' can figure that out, but if it prints -+a message saying it cannot guess the machine type, give it the -+`--build=TYPE' option. TYPE can either be a short name for the system -+type, such as `sun4', or a canonical name which has the form: -+ -+ CPU-COMPANY-SYSTEM -+ -+where SYSTEM can have one of these forms: -+ -+ OS -+ KERNEL-OS -+ -+ See the file `config.sub' for the possible values of each field. If -+`config.sub' isn't included in this package, then this package doesn't -+need to know the machine type. -+ -+ If you are _building_ compiler tools for cross-compiling, you should -+use the option `--target=TYPE' to select the type of system they will -+produce code for. -+ -+ If you want to _use_ a cross compiler, that generates code for a -+platform different from the build platform, you should specify the -+"host" platform (i.e., that on which the generated programs will -+eventually be run) with `--host=TYPE'. -+ -+Sharing Defaults -+================ -+ -+ If you want to set default values for `configure' scripts to share, -+you can create a site shell script called `config.site' that gives -+default values for variables like `CC', `cache_file', and `prefix'. -+`configure' looks for `PREFIX/share/config.site' if it exists, then -+`PREFIX/etc/config.site' if it exists. Or, you can set the -+`CONFIG_SITE' environment variable to the location of the site script. -+A warning: not all `configure' scripts look for a site script. -+ -+Defining Variables -+================== -+ -+ Variables not defined in a site shell script can be set in the -+environment passed to `configure'. However, some packages may run -+configure again during the build, and the customized values of these -+variables may be lost. In order to avoid this problem, you should set -+them in the `configure' command line, using `VAR=value'. For example: -+ -+ ./configure CC=/usr/local2/bin/gcc -+ -+causes the specified `gcc' to be used as the C compiler (unless it is -+overridden in the site shell script). -+ -+Unfortunately, this technique does not work for `CONFIG_SHELL' due to -+an Autoconf bug. Until the bug is fixed you can use this workaround: -+ -+ CONFIG_SHELL=/bin/bash /bin/bash ./configure CONFIG_SHELL=/bin/bash -+ -+`configure' Invocation -+====================== -+ -+ `configure' recognizes the following options to control how it -+operates. -+ -+`--help' -+`-h' -+ Print a summary of all of the options to `configure', and exit. -+ -+`--help=short' -+`--help=recursive' -+ Print a summary of the options unique to this package's -+ `configure', and exit. The `short' variant lists options used -+ only in the top level, while the `recursive' variant lists options -+ also present in any nested packages. -+ -+`--version' -+`-V' -+ Print the version of Autoconf used to generate the `configure' -+ script, and exit. -+ -+`--cache-file=FILE' -+ Enable the cache: use and save the results of the tests in FILE, -+ traditionally `config.cache'. FILE defaults to `/dev/null' to -+ disable caching. -+ -+`--config-cache' -+`-C' -+ Alias for `--cache-file=config.cache'. -+ -+`--quiet' -+`--silent' -+`-q' -+ Do not print messages saying which checks are being made. To -+ suppress all normal output, redirect it to `/dev/null' (any error -+ messages will still be shown). -+ -+`--srcdir=DIR' -+ Look for the package's source code in directory DIR. Usually -+ `configure' can determine that directory automatically. -+ -+`--prefix=DIR' -+ Use DIR as the installation prefix. *note Installation Names:: -+ for more details, including other options available for fine-tuning -+ the installation locations. -+ -+`--no-create' -+`-n' -+ Run the configure checks, but stop before creating any output -+ files. -+ -+`configure' also accepts some other, not widely useful, options. Run -+`configure --help' for more details. -+ ---- /dev/null -+++ b/Makefile.am -@@ -0,0 +1,2 @@ -+ACLOCAL_AMFLAGS = -Im4 -+SUBDIRS = libraries parser changehat deprecated profiles tests utils po ---- /dev/null -+++ b/NEWS -@@ -0,0 +1 @@ -+ ---- /dev/null -+++ b/changehat/Makefile.am -@@ -0,0 +1 @@ -+SUBDIRS = mod_apparmor pam_apparmor tomcat_apparmor ---- /dev/null -+++ b/changehat/mod_apparmor/Makefile.am -@@ -0,0 +1,23 @@ -+if HAVE_APACHE -+INCLUDES = "-I../../libraries/libapparmor/src" -+LIBAPPARMOR="../../libraries/libapparmor/src/libapparmor.la" -+ -+all-local: module -+ -+module: mod_apparmor.c -+ if test "$(srcdir)" != "."; then $(CP) $(srcdir)/mod_apparmor.c . ; fi -+ $(APXS) -c $(INCLUDES) $(LIBAPPARMOR) $< -+ -+install-exec-local: module -+ $(MKDIR_P) $(DESTDIR)$(apache_moduledir) -+ $(APXS) -S LIBEXECDIR=$(DESTDIR)$(apache_moduledir) -i mod_apparmor.la -+ -+man_MANS = mod_apparmor.8 -+ -+PODARGS = --center=AppArmor --release=NOVELL/SUSE -+ -+pod2man = pod2man $(PODARGS) --section $(subst .,,$(suffix $<)) $< > $@ -+ -+.pod.8: -+ $(pod2man) -+endif ---- a/changehat/mod_apparmor/apache2-mod_apparmor.spec.in -+++ /dev/null -@@ -1,215 +0,0 @@ --# ---------------------------------------------------------------------- --# Copyright (c) 2004, 2005 NOVELL (All rights reserved) --# --# This program is free software; you can redistribute it and/or --# modify it under the terms of version 2 of the GNU General Public --# License published by the Free Software Foundation. --# --# This program is distributed in the hope that it will be useful, --# but WITHOUT ANY WARRANTY; without even the implied warranty of --# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the --# GNU General Public License for more details. --# --# You should have received a copy of the GNU General Public License --# along with this program; if not, contact Novell, Inc. --# ---------------------------------------------------------------------- --# norootforbuild -- --# Check first to see if distro is already defined. --# I hate rpm macros --%if ! %{?distro:1}0 --%if %{?suse_version:1}0 -- %define distro suse --%endif --%if %{?fedora_version:1}0 -- %define distro redhat --%endif --%endif --%if ! %{?distro:1}0 -- %define distro suse --%endif -- --# this is required to be underscore --%define module_name mod_apparmor -- --Summary: AppArmor module for apache2. --Name: apache2-mod_apparmor --Version: @@immunix_version@@ --Release: @@repo_version@@ --Group: Applications/System --Source0: %{name}-%{version}-@@repo_version@@.tar.gz --License: LGPL --BuildRoot: %{?_tmppath:}%{!?_tmppath:/var/tmp}/%{name}-%{version}-build --Url: http://forge.novell.com/modules/xfmod/project/?apparmor --Obsoletes: mod_change_hat mod-change-hat mod-apparmor apache2-mod-apparmor --Provides: mod_change_hat mod-change-hat mod-apparmor apache2-mod-apparmor -- --%if %{distro} == "suse" --%if 0%{?suse_version} < 1010 --BuildRequires: libimmunix --%else --%if 0%{?suse_version} < 1030 --BuildRequires: libapparmor --%else --BuildRequires: libapparmor-devel --%endif --%endif --%else --BuildRequires: libapparmor-devel --%endif -- --%if %{distro} == "suse" --%define apxs /usr/sbin/apxs2 --%define apache_mmn %(MMN=$(%{apxs} -q LIBEXECDIR)_MMN; test -x $MMN && $MMN) --Prereq: apache2-prefork --Prereq: apparmor-parser --BuildRequires: apache2-devel --Requires: apache2 %{apache_mmn} --%else --%if %{distro} == "redhat" || %{distro} == "rhel4" --%define apxs /usr/sbin/apxs --Prereq: httpd --BuildRequires: httpd-devel --%endif --%endif --%define module_path %(%{apxs} -q LIBEXECDIR) --%define apache_sysconfdir %(%{apxs} -q SYSCONFDIR) -- --%description --apache2-mod_apparmor adds support to apache2 to provide AppArmor confinement --to individual cgi scripts handled by apache modules like mod_php and --mod_perl. --This package is part of a suite of tools that used to be named SubDomain. -- --%prep -- --%setup -q -- --%build --make APXS=%{apxs} -- --%install --make install DESTDIR=${RPM_BUILD_ROOT} DISTRO=%{distro} MANDIR=%{_mandir} -- --%if %{distro} == "suse" -- mkdir -p ${RPM_BUILD_ROOT}%{_libdir}/apache2-prefork/ -- ln -s %{module_path}/%{module_name}.so ${RPM_BUILD_ROOT}%{_libdir}/apache2-prefork/%{module_name}.so --%else -- %if %{distro} == "redhat" || %{distro} == "rhel4" -- mkdir -p ${RPM_BUILD_ROOT}/%{apache_sysconfdir}.d/ -- install -m 644 %{module_name}.conf ${RPM_BUILD_ROOT}/%{apache_sysconfdir}.d/ -- %endif --%endif -- --%clean --[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf $RPM_BUILD_ROOT -- --%files --%defattr(-,root,root) --%{module_path} --%if %{distro} == "suse" -- %{_libdir}/apache2-prefork/%{module_name}.so --%else -- %if %{distro} == "redhat" || %{distro} == "rhel4" -- %{apache_sysconfdir}.d/%{module_name}.conf -- %endif --%endif --%doc COPYING.LGPL --%{_mandir}/man*/* --%doc *.[0-9].html --%doc common/apparmor.css -- --%post --%if %{distro} == "suse" -- /usr/sbin/a2enmod apparmor --%endif -- --%preun --%if %{distro} == "suse" -- if [ $1 = 0 ] ; then -- /usr/sbin/a2dismod apparmor -- fi --%endif -- --%triggerpostun -- mod_change_hat mod-change-hat --%if %{distro} == "suse" -- /usr/sbin/a2enmod apparmor --%endif -- --%changelog --* Sun Jul 29 2007 - sbeattie@suse.de --- Convert builddep on libapparmor to libapparmor-devel --* Tue Apr 3 2007 - sbeattie@suse.de --- Add mod_apparmor manpage to package --* Wed Sep 06 2006 - poeml@suse.de --- rename to apache2-mod_apparmor --- use a2enmod instead of frob_sysconfig --- remove SuSEconfig calls --* Fri May 26 2006 - schwab@suse.de --- Don't strip binaries. --* Wed Apr 12 2006 - Steve Beattie --- Move to novell forge svn repo; fix build issue with new layout --* Thu Mar 30 2006 - Seth Arnold 2.0-7.2 --- Relicense to LGPL --* Mon Jan 30 2006 - Steve Beattie 2.0-7.1 --- Renamed apache config options: -- ImmhatName -> AAHatName -- ImmDefaultHatName -> AADefaultHatName --* Mon Jan 30 2006 - poeml@suse.de --- removed libapr-util1-devel from BuildRequires (apache2-devel does -- require it) --* Fri Jan 27 2006 Steve Beattie 2.0-6.1 --- No more neededforbuild in STABLE --* Wed Jan 25 2006 Steve Beattie 2.0-6 --- Fix linking against libapparmor.so --* Sun Jan 8 2006 Steve Beattie 2.0-5 --- More SUSE autobuild fixups. --* Wed Jan 4 2006 Steve Beattie 2.0-4 --- Fixup SUSE autobuild require on apache-devel-packages --- Add svn revision to the source tarball --* Sun Dec 18 2005 Steve Beattie 2.0-3 --- Include symlink in %{_libdir}/apache2-prefork/ --* Thu Dec 8 2005 Steve Beattie 2.0-2 --- Rename to apache2-mod-apparmor for consistency w/SUSE packages --- Rename module to mod_apparmor.so --* Wed Dec 7 2005 Steve Beattie 2.0-1 --- Reset version for inclusion in SUSE autobuild --* Mon Dec 5 2005 Steve Beattie 1.99-9 --- Rename package to mod-apparmor --* Wed Nov 30 2005 Steve Beattie 1.99-8 --- Minor packaging cleanups --* Wed Nov 30 2005 Steve Beattie 1.99-7_imnx --- Convert license to GPL --* Thu Jun 23 2005 Steve Beattie 1.99-6_imnx --- Add trigger for mod_change_hat => mod-change-hat upgrades --- Don't run SuSEconfig on SuSE 9.3 or newer --* Mon May 23 2005 Steve Beattie 1.99-5_imnx --- Fix package uninstall on RHEL4. --* Fri Mar 11 2005 Steve Beattie 1.99-4_imnx --- Rename to be consistent with other packages --* Fri Feb 18 2005 Steve Beattie 1.99-3_imnx --- Cleanup some non-64bit clean code, sigh. --- Fix install locations on 64-bit platform. --* Fri Feb 4 2005 Seth Arnold 1.99-1_imnx --- Reversion to 1.99 --* Fri Nov 12 2004 Steve Beattie 1.2-2_imnx --- Add configuration file for redhat build --* Tue Oct 12 2004 Steve Beattie 1.2-1_imnx --- Bump version after shass-1.1 branched off --* Mon Sep 20 2004 Dominic Reynolds 1.0-7_imnx_(redhat|suse) --- Modified to build separate versions for suse/redhat (EL3). --- Note:RH version does not currently setup the module configuraiton --- in apache. --* Tue Aug 31 2004 Steve Beattie 1.0-6_imnx --- Got location and per server config directives working somewhat -- correctly :-) --- copyright fixups. --* Fri Aug 20 2004 Steve Beattie 1.0-5_imnx --- added support for hatname --* Wed Jul 21 2004 Steve Beattie 1.0-4_imnx --- reduced loglevel of some debug messages --- add change_hat to list of apache modules --* Tue Jul 20 2004 Steve Beattie 1.0-2_imnx --- got module actually working, at least in simple cases. --* Thu Jul 15 2004 Steve Beattie 1.0-1_imnx --- Initial package creation. ---- a/changehat/pam_apparmor/COPYING -+++ /dev/null -@@ -1,39 +0,0 @@ --The pam_apparmor package is licensed under the same license as Linux-PAM --, quoted below: -- --------------------------------------------------------------------------- --Redistribution and use in source and binary forms of Linux-PAM, with --or without modification, are permitted provided that the following --conditions are met: -- --1. Redistributions of source code must retain any existing copyright -- notice, and this entire permission notice in its entirety, -- including the disclaimer of warranties. -- --2. Redistributions in binary form must reproduce all prior and current -- copyright notices, this list of conditions, and the following -- disclaimer in the documentation and/or other materials provided -- with the distribution. -- --3. The name of any author may not be used to endorse or promote -- products derived from this software without their specific prior -- written permission. -- --ALTERNATIVELY, this product may be distributed under the terms of the --GNU General Public License, in which case the provisions of the GNU --GPL are required INSTEAD OF the above restrictions. (This clause is --necessary due to a potential conflict between the GNU GPL and the --restrictions contained in a BSD-style copyright.) -- --THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED --WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF --MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. --IN NO EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY DIRECT, INDIRECT, --INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, --BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS --OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND --ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR --TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE --USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH --DAMAGE. --------------------------------------------------------------------------- ---- /dev/null -+++ b/changehat/pam_apparmor/Makefile.am -@@ -0,0 +1,9 @@ -+if HAVE_PAM -+securitydir = $(libdir)/security -+security_LTLIBRARIES = pam_apparmor.la -+pam_apparmor_la_SOURCES = pam_apparmor.c get_options.c -+pam_apparmor_la_LDFLAGS = -module -avoid-version -+pam_apparmor_la_LIBADD = ../../libraries/libapparmor/src/libapparmor.la -lpam -+ -+INCLUDES = "-I../../libraries/libapparmor/src" -+endif ---- a/changehat/pam_apparmor/pam_apparmor.changes -+++ /dev/null -@@ -1,49 +0,0 @@ --------------------------------------------------------------------- --Mon Jul 30 08:16:39 CEST 2007 - sbeattie@suse.de -- --- Convert libapparmor builddep to libapparmor-devel -- --------------------------------------------------------------------- --Tue Mar 13 10:27:34 PDT 2007 - jmichael@suse.de -- --- Use pam_modutil_* wrapper functions when possible -- --------------------------------------------------------------------- --Tue Oct 31 12:00:00 UTC 2006 - jmichael@suse.de -- --- Add debug option -- --------------------------------------------------------------------- --Tue Oct 31 12:00:00 UTC 2006 - sbeattie@suse.de -- --- Add configuration options to order attempted hat changes -- --------------------------------------------------------------------- --Wed Oct 25 12:00:00 UTC 2006 - sbeattie@suse.de -- --- remove auto-editing of pam's common-session --- honor RPM's CFLAGS when building --- add license (same as Linux PAM package). -- --------------------------------------------------------------------- --Thu Sep 14 12:00:00 UTC 2006 - jmichael@suse.de -- --- header comment was incorrect --- use pam_get_user() instead of pam_get_item() --- fix read from urandom if 0 -- --------------------------------------------------------------------- --Fri Jan 13 12:00:00 UTC 2006 - sbeattie@suse.de -- --- Add svn repo number to tarball -- --------------------------------------------------------------------- --Fri Jan 13 12:00:00 UTC 2006 - jmichael@suse.de -- --- Make magic tokens harder to guess by pulling them from /dev/urandom -- --------------------------------------------------------------------- --Wed Dec 21 10:31:40 PST 2005 - jmichael@suse.de -- --- initial -- ---- a/changehat/pam_apparmor/pam_apparmor.spec.in -+++ /dev/null -@@ -1,83 +0,0 @@ --# --# spec file for package pam_apparmor (Version 2) --# --# Copyright (c) 2005 SUSE LINUX Products GmbH, Nuernberg, Germany. --# This file and all modifications and additions to the pristine --# package are under the same license as the package itself. --# --# Please submit bugfixes or comments via http://www.suse.de/feedback/ --# -- --# norootforbuild -- --Name: pam_apparmor --License: GPL --Group: Productivity/Security --Autoreqprov: on --Version: @@immunix_version@@ --Release: @@repo_version@@ --Summary: Pam module to add AppArmor change_hat functionality --URL: http://forge.novell.com/modules/xfmod/project/?apparmor --Source: pam_apparmor-%{version}-@@repo_version@@.tar.gz --BuildRoot: %{_tmppath}/%{name}-%{version}-build --BuildRequires: pam-devel --Requires: pam --Prereq: pam -- --%if %{?suse_version:1}0 --%if 0%{?suse_version} < 1030 --BuildRequires: libapparmor --%else --BuildRequires: libapparmor-devel --%endif --%else --BuildRequires: libapparmor-devel --%endif -- --%description --The pam_apparmor module provides the means for any pam applications that --call pam_open_session() to automatically perform an AppArmor change_hat --operation in order to switch to a user-specific security policy. -- -- --Authors: ---------- -- Jesse Michael jmichael@suse.de -- --%prep --%setup -q -- --%build --make CFLAGS="${RPM_OPT_FLAGS}" -- --%install --[ "$RPM_BUILD_ROOT" != "/" ] && rm -rf $RPM_BUILD_ROOT --make install DESTDIR=${RPM_BUILD_ROOT} SECDIR=${RPM_BUILD_ROOT}/%{_lib}/security -- --%clean --[ "${RPM_BUILD_ROOT}" != "/" ] && rm -rf ${RPM_BUILD_ROOT} -- --%files --%defattr(444,root,root,755) --%doc README COPYING --%attr(555,root,root) /%{_lib}/security/pam_apparmor.so -- --%changelog -n pam_apparmor --* Tue Oct 31 2006 Jesse Michael --- Add debug option --* Tue Oct 31 2006 Steve Beattie --- Add configuration options to order attempted hat changes --* Wed Oct 25 2006 Steve Beattie --- remove auto-editing of pam's common-session --- honor RPM's CFLAGS when building --- add license (same as Linux PAM package). --* Thu Sep 14 2006 Jesse Michael --- header comment was incorrect --- use pam_get_user() instead of pam_get_item() --- fix read from urandom if 0 --* Fri Jan 13 2006 Steve Beattie --- Add svn repo number to tarball --* Fri Jan 13 2006 Jesse Michael --- Make magic tokens harder to guess by pulling them from /dev/urandom --* Wed Dec 21 2005 - jmichael@suse.de --- initial ---- /dev/null -+++ b/changehat/tomcat_apparmor/Makefile.am -@@ -0,0 +1 @@ -+SUBDIRS = tomcat_5_0 tomcat_5_5 ---- /dev/null -+++ b/changehat/tomcat_apparmor/tomcat_5_0/Makefile.am -@@ -0,0 +1,2 @@ -+ -+ ---- /dev/null -+++ b/changehat/tomcat_apparmor/tomcat_5_5/Makefile.am -@@ -0,0 +1,13 @@ -+ -+changeHatValve.jar: $(srcdir)/src/com/novell/apparmor/JNIChangeHat.java \ -+ $(srcdir)/src/com/novell/apparmor/catalina/valves/ChangeHatValve.java -+ ant -Dinstall_lib=$(libdir) -Ddist=$(abs_srcdir) \ -+ -Dant.build.javac.source=1.5 jar -+ -+catalinadir = $(datadir)/tomcat6/lib -+ -+catalina_DATA = changeHatValve.jar -+ -+CLEANFILES = $(catalina_DATA) -+ -+SUBDIRS = . src ---- a/changehat/tomcat_apparmor/tomcat_5_5/build.xml -+++ b/changehat/tomcat_apparmor/tomcat_5_5/build.xml -@@ -38,9 +38,16 @@ - - - -+ - - -- - -@@ -81,6 +88,8 @@ - - - -+ -+ - - - ---- /dev/null -+++ b/changehat/tomcat_apparmor/tomcat_5_5/src/Makefile.am -@@ -0,0 +1 @@ -+SUBDIRS = jni_src ---- /dev/null -+++ b/changehat/tomcat_apparmor/tomcat_5_5/src/jni_src/Makefile.am -@@ -0,0 +1,17 @@ -+lib_LTLIBRARIES = libJNIChangeHat.la -+ -+INCLUDES = -I$(top_srcdir)/libraries/libapparmor/src -I$(JAVA_HOME)/include \ -+ -I$(JAVA_HOME)/include/linux -+ -+CLASSPATH = $(builddir)/../../build/ -+CLASSFILE = $(CLASSPATH)/com/novell/apparmor/JNIChangeHat.class -+ -+com_novell_apparmor_JNIChangeHat.h: -+ javah -jni -classpath $(CLASSPATH) com.novell.apparmor.JNIChangeHat -+ -+JNIChangeHat.c : com_novell_apparmor_JNIChangeHat.h -+ -+libJNIChangeHat_la_SOURCES = JNIChangeHat.c -+libJNIChangeHat_la_LIBADD = $(top_builddir)/libraries/libapparmor/src/.libs/libapparmor.la -+ -+CLEANFILES = com_novell_apparmor_JNIChangeHat.h ---- /dev/null -+++ b/configure.in -@@ -0,0 +1,203 @@ -+AC_INIT(apparmor, 2.5.1) -+AC_CONFIG_MACRO_DIR([m4]) -+ -+AM_INIT_AUTOMAKE -+ -+AM_PROG_LEX -+AC_PROG_YACC -+AC_PROG_SED -+AC_HEADER_STDC -+AC_CHECK_HEADERS(unistd.h stdint.h) -+ -+AC_CHECK_FUNCS(asprintf) -+ -+AM_PROG_CC_C_O -+AC_PROG_CXX -+AC_C_CONST -+AM_PROG_LIBTOOL -+AC_PATH_PROG([SWIG], [swig]) -+ -+PROG_POD2MAN -+ -+AC_ARG_WITH(perl, -+[AS_HELP_STRING([--with-perl], -+ [enable perl support for libapparmor [default=detect]])], -+[with_perl=$withval], [with_perl=auto]) -+if test "$with_perl" != "no"; then -+ AM_PATH_PERL(,,[no]) -+ if test "$PERL" = "no"; then -+ if test "$with_perl" = "yes"; then -+ AC_MSG_ERROR([--with-perl was given but the perl interpreter could not be found]) -+ fi -+ else -+ with_perl=yes -+ fi -+fi -+ -+AC_ARG_WITH(python, -+[AS_HELP_STRING([--with-python], -+ [enable python support for libapparmor [default=detect]])], -+[with_python=$withval], [with_python=auto]) -+if test "$with_python" != "no"; then -+ AM_PATH_PYTHON(,,[no]) -+ if test "$PYTHON" = "no"; then -+ if test "$with_python" = "yes"; then -+ AC_MSG_ERROR([--with-python was given but the python interpreter could not be found]) -+ fi -+ else -+ with_python=yes -+ fi -+fi -+ -+AC_ARG_WITH(ruby, -+[AS_HELP_STRING([--with-ruby], -+ [enable ruby support for libapparmor [default=detect]])], -+[with_ruby=$withval], [with_ruby=auto]) -+if test "$with_ruby" != "no"; then -+ AM_PATH_RUBY(,,[no]) -+ -+ CPPFLAGS="$CPPFLAGS $RUBY_INCLUDES" -+ AC_CHECK_HEADER(ruby.h,,RUBY=no,[-]) -+ -+ if test "$RUBY" = "no"; then -+ if test "$with_ruby" = "yes"; then -+ AC_MSG_ERROR([--with-ruby was given but the ruby development environment could not be found]) -+ fi -+ else -+ with_ruby=yes -+ fi -+fi -+ -+AC_ARG_WITH(apache, -+[AS_HELP_STRING([--with-apache], -+ [enable the apache2 changehat module [default=detect]])], -+[with_apache=$withval], [with_apache=auto]) -+if test "$with_apache" != "no"; then -+ AM_PATH_APXS(,,[no]) -+ if test "$APXS" = "no"; then -+ if test "$with_apache" = "yes";then -+ AC_MSG_ERROR([--with-apache was given but the apache build environment could not be found]) -+ fi -+ else -+ with_apache=yes -+ apache_moduledir=`$APXS -q LIBEXECDIR` -+ AC_SUBST(apache_moduledir) -+ fi -+fi -+ -+AC_ARG_WITH(tomcat, -+[AS_HELP_STRING([--with-tomcat], -+ [enable the tomcat changehat module [default=no]])], -+[with_tomcat=$withval], [with_tomcat=no]) -+ -+AC_ARG_WITH(pam, -+[AS_HELP_STRING([--with-pam], -+ [enable the PAM changehat module [default=auto]])], -+[with_pam=$withval], [with_pam=auto]) -+if test "$with_pam" != "no"; then -+ AC_CHECK_HEADERS([security/pam_modules.h]) -+ if test "$ac_cv_header_security_pam_modules_h" != "yes"; then -+ if test "$with_pam" = "yes";then -+ AC_MSG_ERROR([--with-pam was giving but the pam build environment could not be found]) -+ fi -+ else -+ with_pam=yes -+ fi -+fi -+ -+AC_ARG_WITH(dbus, -+[AS_HELP_STRING([--with-dbus], -+ [enable dbus support (deprecated) [default=no]])], -+[with_dbus=$withval], [with_dbus=no]) -+ -+if test "$with_dbus" = "yes"; then -+ PKG_CHECK_MODULES(dbus, dbus-1, DBUS=yes, DBUS=no) -+ if test "$DBUS" = "no"; then -+ AC_MSG_ERROR([dbus could not be found]) -+ fi -+fi -+ -+AC_ARG_WITH(profileeditor, -+[AS_HELP_STRING([--with-profileeditor], -+ [enable profileeditor (deprecated) [default=no]])], -+[with_profileeditor=$withval], [with_profileeditor=no]) -+ -+if test "$with_profileeditor" = "yes"; then -+ WXTEST -+fi -+ -+real_sbindir="/sbin" -+AC_SUBST(real_sbindir) -+ -+etc_apparmordir="/etc/apparmor" -+AC_SUBST(etc_apparmordir) -+ -+lib_apparmordir="/lib/apparmor" -+AC_SUBST(lib_apparmordir) -+ -+AM_CONDITIONAL(HAVE_PYTHON, test "$with_python" = "yes") -+AM_CONDITIONAL(HAVE_PERL, test "$with_perl" = "yes") -+AM_CONDITIONAL(HAVE_RUBY, test "$with_ruby" = "yes") -+AM_CONDITIONAL(HAVE_PAM, test "$with_pam" = "yes") -+AM_CONDITIONAL(HAVE_APACHE, test "$with_apache" = "yes") -+AM_CONDITIONAL(HAVE_TOMCAT, test "$with_tomcat" = "yes") -+AM_CONDITIONAL(HAVE_DBUS, test "$with_dbus" = "yes") -+AM_CONDITIONAL(HAVE_PROFILEEDITOR, test "$with_profileeditor" = "yes") -+ -+AC_OUTPUT( -+Makefile -+libraries/Makefile -+libraries/libapparmor/Makefile -+libraries/libapparmor/doc/Makefile -+libraries/libapparmor/src/Makefile -+libraries/libapparmor/swig/Makefile -+libraries/libapparmor/swig/perl/Makefile -+libraries/libapparmor/swig/python/Makefile -+libraries/libapparmor/swig/python/setup.py -+libraries/libapparmor/swig/ruby/Makefile -+libraries/libapparmor/testsuite/Makefile -+libraries/libapparmor/testsuite/config/Makefile -+libraries/libapparmor/testsuite/libaalogparse.test/Makefile -+libraries/libapparmor/testsuite/lib/Makefile -+parser/Makefile -+parser/libapparmor_re/Makefile -+changehat/Makefile -+changehat/mod_apparmor/Makefile -+changehat/pam_apparmor/Makefile -+changehat/tomcat_apparmor/Makefile -+changehat/tomcat_apparmor/tomcat_5_0/Makefile -+changehat/tomcat_apparmor/tomcat_5_5/Makefile -+changehat/tomcat_apparmor/tomcat_5_5/src/Makefile -+changehat/tomcat_apparmor/tomcat_5_5/src/jni_src/Makefile -+deprecated/Makefile -+deprecated/management/Makefile -+deprecated/management/apparmor-dbus/Makefile -+deprecated/management/apparmor-dbus/src/Makefile -+deprecated/management/profile-editor/Makefile -+deprecated/management/profile-editor/doc/Makefile -+deprecated/management/profile-editor/doc/en/Makefile -+deprecated/management/profile-editor/src/Makefile -+deprecated/management/profile-editor/src/wxStyledTextCtrl/Makefile -+tests/Makefile -+tests/regression/Makefile -+tests/regression/subdomain/Makefile -+utils/Makefile -+utils/Immunix/Makefile -+po/Makefile -+) -+ -+AC_MSG_RESULT([Summary:]) -+AC_MSG_RESULT([libapprmor bindings:]) -+AC_MSG_RESULT([ Perl: $with_perl]) -+AC_MSG_RESULT([ Python: $with_python]) -+AC_MSG_RESULT([ Ruby: $with_ruby]) -+AC_MSG_RESULT([changehat support:]) -+AC_MSG_RESULT([ PAM: $with_pam]) -+AC_MSG_RESULT([ Apache: $with_apache]) -+AC_MSG_RESULT([ Tomcat: $with_tomcat]) -+AC_MSG_RESULT([Deprecated management interfaces:]) -+AC_MSG_RESULT([ DBUS: $with_dbus]) -+AC_MSG_RESULT([ Profile Editor: $with_profileeditor]) -+ -+ -+ ---- /dev/null -+++ b/deprecated/Makefile.am -@@ -0,0 +1,2 @@ -+ -+SUBDIRS = management ---- /dev/null -+++ b/deprecated/management/Makefile.am -@@ -0,0 +1 @@ -+SUBDIRS = apparmor-dbus profile-editor ---- a/deprecated/management/apparmor-dbus/Makefile.am -+++ b/deprecated/management/apparmor-dbus/Makefile.am -@@ -2,4 +2,6 @@ - # have all needed files, that a GNU package needs - AUTOMAKE_OPTIONS = foreign 1.4 - -+if HAVE_DBUS - SUBDIRS = src -+endif ---- a/deprecated/management/apparmor-dbus/src/Makefile.am -+++ b/deprecated/management/apparmor-dbus/src/Makefile.am -@@ -1,2 +1,5 @@ - bin_PROGRAMS = apparmor-dbus -+ -+apparmor_dbus_CPPFLAGS = $(dbus_CFLAGS) -+apparmor_dbus_LDADD = $(dbus_LIBS) $(top_builddir)/libraries/libapparmor/src/libapparmor.la - apparmor_dbus_SOURCES = aadbus.c ---- a/deprecated/management/profile-editor/Makefile.am -+++ b/deprecated/management/profile-editor/Makefile.am -@@ -1,2 +1,4 @@ -+if HAVE_PROFILEEDITOR - SUBDIRS = src doc -+endif - ---- a/deprecated/management/profile-editor/src/Makefile.am -+++ b/deprecated/management/profile-editor/src/Makefile.am -@@ -12,13 +12,15 @@ bin_PROGRAMS = profileeditor - # the application source, library search path, and link libraries - profileeditor_SOURCES = ProfileTextCtrl.cpp Preferences.cpp AboutDialog.cpp \ - SearchAllProfiles.cpp Configuration.cpp profileeditor.cpp --profileeditor_LDFLAGS = $(WX_LIBS) -+profileeditor_LDFLAGS = $(WX_LIBS) - - - - SUBDIRS = wxStyledTextCtrl -+profileeditor_CPPFLAGS = $(WX_CPPFLAGS) -+profileeditor_CXXFLAGS = $(WX_CXXFLAGS) - profileeditor_LDADD =\ -- $(top_builddir)/src/wxStyledTextCtrl/libAppArmorStyledTextCtrl.a -+ $(builddir)/wxStyledTextCtrl/libAppArmorStyledTextCtrl.a - - AM_CXXFLAGS = \ - -DHELP_FILE_LOCATION=\"$(datadir)/doc/@PACKAGE@/AppArmorProfileEditor.htb\" ---- a/deprecated/management/profile-editor/src/wxStyledTextCtrl/Makefile.am -+++ b/deprecated/management/profile-editor/src/wxStyledTextCtrl/Makefile.am -@@ -15,5 +15,5 @@ noinst_HEADERS = Accessor.h AutoComplete - UniConversion.h ViewStyle.h WindowAccessor.h XPM.h - - AM_CFLAGS = -DSCI_LEXER -DLINK_LEXERS -fPIC -DPIC -DWX_PRECOMP -DNO_GCC_PRAGMA \ -- -D__WXGTK__ -D__WX__ --AM_CXXFLAGS = -DSCI_LEXER -DLINK_LEXERS -fPIC -DPIC -DWX_PRECOMP -DNO_GCC_PRAGMA -+ -D__WXGTK__ -D__WX__ $(WX_CPPFLAGS) -+AM_CXXFLAGS = -DSCI_LEXER -DLINK_LEXERS -fPIC -DPIC -DWX_PRECOMP -DNO_GCC_PRAGMA $(WX_CXXFLAGS) ---- /dev/null -+++ b/libraries/Makefile.am -@@ -0,0 +1 @@ -+SUBDIRS = libapparmor ---- a/libraries/libapparmor/AUTHORS -+++ /dev/null -@@ -1,2 +0,0 @@ --Steve Beattie --Matt Barringer ---- a/libraries/libapparmor/ChangeLog -+++ /dev/null -@@ -1 +0,0 @@ -- ---- a/libraries/libapparmor/INSTALL -+++ /dev/null -@@ -1,236 +0,0 @@ --Installation Instructions --************************* -- --Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004, 2005 Free --Software Foundation, Inc. -- --This file is free documentation; the Free Software Foundation gives --unlimited permission to copy, distribute and modify it. -- --Basic Installation --================== -- --These are generic installation instructions. -- -- The `configure' shell script attempts to guess correct values for --various system-dependent variables used during compilation. It uses --those values to create a `Makefile' in each directory of the package. --It may also create one or more `.h' files containing system-dependent --definitions. Finally, it creates a shell script `config.status' that --you can run in the future to recreate the current configuration, and a --file `config.log' containing compiler output (useful mainly for --debugging `configure'). -- -- It can also use an optional file (typically called `config.cache' --and enabled with `--cache-file=config.cache' or simply `-C') that saves --the results of its tests to speed up reconfiguring. (Caching is --disabled by default to prevent problems with accidental use of stale --cache files.) -- -- If you need to do unusual things to compile the package, please try --to figure out how `configure' could check whether to do them, and mail --diffs or instructions to the address given in the `README' so they can --be considered for the next release. If you are using the cache, and at --some point `config.cache' contains results you don't want to keep, you --may remove or edit it. -- -- The file `configure.ac' (or `configure.in') is used to create --`configure' by a program called `autoconf'. You only need --`configure.ac' if you want to change it or regenerate `configure' using --a newer version of `autoconf'. -- --The simplest way to compile this package is: -- -- 1. `cd' to the directory containing the package's source code and type -- `./configure' to configure the package for your system. If you're -- using `csh' on an old version of System V, you might need to type -- `sh ./configure' instead to prevent `csh' from trying to execute -- `configure' itself. -- -- Running `configure' takes awhile. While running, it prints some -- messages telling which features it is checking for. -- -- 2. Type `make' to compile the package. -- -- 3. Optionally, type `make check' to run any self-tests that come with -- the package. -- -- 4. Type `make install' to install the programs and any data files and -- documentation. -- -- 5. You can remove the program binaries and object files from the -- source code directory by typing `make clean'. To also remove the -- files that `configure' created (so you can compile the package for -- a different kind of computer), type `make distclean'. There is -- also a `make maintainer-clean' target, but that is intended mainly -- for the package's developers. If you use it, you may have to get -- all sorts of other programs in order to regenerate files that came -- with the distribution. -- --Compilers and Options --===================== -- --Some systems require unusual options for compilation or linking that the --`configure' script does not know about. Run `./configure --help' for --details on some of the pertinent environment variables. -- -- You can give `configure' initial values for configuration parameters --by setting variables in the command line or in the environment. Here --is an example: -- -- ./configure CC=c89 CFLAGS=-O2 LIBS=-lposix -- -- *Note Defining Variables::, for more details. -- --Compiling For Multiple Architectures --==================================== -- --You can compile the package for more than one kind of computer at the --same time, by placing the object files for each architecture in their --own directory. To do this, you must use a version of `make' that --supports the `VPATH' variable, such as GNU `make'. `cd' to the --directory where you want the object files and executables to go and run --the `configure' script. `configure' automatically checks for the --source code in the directory that `configure' is in and in `..'. -- -- If you have to use a `make' that does not support the `VPATH' --variable, you have to compile the package for one architecture at a --time in the source code directory. After you have installed the --package for one architecture, use `make distclean' before reconfiguring --for another architecture. -- --Installation Names --================== -- --By default, `make install' installs the package's commands under --`/usr/local/bin', include files under `/usr/local/include', etc. You --can specify an installation prefix other than `/usr/local' by giving --`configure' the option `--prefix=PREFIX'. -- -- You can specify separate installation prefixes for --architecture-specific files and architecture-independent files. If you --pass the option `--exec-prefix=PREFIX' to `configure', the package uses --PREFIX as the prefix for installing programs and libraries. --Documentation and other data files still use the regular prefix. -- -- In addition, if you use an unusual directory layout you can give --options like `--bindir=DIR' to specify different values for particular --kinds of files. Run `configure --help' for a list of the directories --you can set and what kinds of files go in them. -- -- If the package supports it, you can cause programs to be installed --with an extra prefix or suffix on their names by giving `configure' the --option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'. -- --Optional Features --================= -- --Some packages pay attention to `--enable-FEATURE' options to --`configure', where FEATURE indicates an optional part of the package. --They may also pay attention to `--with-PACKAGE' options, where PACKAGE --is something like `gnu-as' or `x' (for the X Window System). The --`README' should mention any `--enable-' and `--with-' options that the --package recognizes. -- -- For packages that use the X Window System, `configure' can usually --find the X include and library files automatically, but if it doesn't, --you can use the `configure' options `--x-includes=DIR' and --`--x-libraries=DIR' to specify their locations. -- --Specifying the System Type --========================== -- --There may be some features `configure' cannot figure out automatically, --but needs to determine by the type of machine the package will run on. --Usually, assuming the package is built to be run on the _same_ --architectures, `configure' can figure that out, but if it prints a --message saying it cannot guess the machine type, give it the --`--build=TYPE' option. TYPE can either be a short name for the system --type, such as `sun4', or a canonical name which has the form: -- -- CPU-COMPANY-SYSTEM -- --where SYSTEM can have one of these forms: -- -- OS KERNEL-OS -- -- See the file `config.sub' for the possible values of each field. If --`config.sub' isn't included in this package, then this package doesn't --need to know the machine type. -- -- If you are _building_ compiler tools for cross-compiling, you should --use the option `--target=TYPE' to select the type of system they will --produce code for. -- -- If you want to _use_ a cross compiler, that generates code for a --platform different from the build platform, you should specify the --"host" platform (i.e., that on which the generated programs will --eventually be run) with `--host=TYPE'. -- --Sharing Defaults --================ -- --If you want to set default values for `configure' scripts to share, you --can create a site shell script called `config.site' that gives default --values for variables like `CC', `cache_file', and `prefix'. --`configure' looks for `PREFIX/share/config.site' if it exists, then --`PREFIX/etc/config.site' if it exists. Or, you can set the --`CONFIG_SITE' environment variable to the location of the site script. --A warning: not all `configure' scripts look for a site script. -- --Defining Variables --================== -- --Variables not defined in a site shell script can be set in the --environment passed to `configure'. However, some packages may run --configure again during the build, and the customized values of these --variables may be lost. In order to avoid this problem, you should set --them in the `configure' command line, using `VAR=value'. For example: -- -- ./configure CC=/usr/local2/bin/gcc -- --causes the specified `gcc' to be used as the C compiler (unless it is --overridden in the site shell script). Here is a another example: -- -- /bin/bash ./configure CONFIG_SHELL=/bin/bash -- --Here the `CONFIG_SHELL=/bin/bash' operand causes subsequent --configuration-related scripts to be executed by `/bin/bash'. -- --`configure' Invocation --====================== -- --`configure' recognizes the following options to control how it operates. -- --`--help' --`-h' -- Print a summary of the options to `configure', and exit. -- --`--version' --`-V' -- Print the version of Autoconf used to generate the `configure' -- script, and exit. -- --`--cache-file=FILE' -- Enable the cache: use and save the results of the tests in FILE, -- traditionally `config.cache'. FILE defaults to `/dev/null' to -- disable caching. -- --`--config-cache' --`-C' -- Alias for `--cache-file=config.cache'. -- --`--quiet' --`--silent' --`-q' -- Do not print messages saying which checks are being made. To -- suppress all normal output, redirect it to `/dev/null' (any error -- messages will still be shown). -- --`--srcdir=DIR' -- Look for the package's source code in directory DIR. Usually -- `configure' can determine that directory automatically. -- --`configure' also accepts some other, not widely useful, options. Run --`configure --help' for more details. -- ---- a/libraries/libapparmor/NEWS -+++ /dev/null -@@ -1 +0,0 @@ --- 2007-06-24 - Initial release, version 0.6 ---- a/libraries/libapparmor/README -+++ /dev/null -@@ -1 +0,0 @@ --What little documentation exists is in src/aalogparse.h. Please file bugs using http://bugzilla.novell.com under the AppArmor product. ---- a/libraries/libapparmor/autogen.sh -+++ /dev/null -@@ -1,42 +0,0 @@ --#!/bin/sh -- --DIE=0 -- --(autoconf --version) < /dev/null > /dev/null 2>&1 || { -- echo -- echo "You must have autoconf installed to compile $package." -- echo "Download the appropriate package for your distribution," -- echo "or get the source tarball at ftp://ftp.gnu.org/pub/gnu/" -- DIE=1 --} -- --(automake --version) < /dev/null > /dev/null 2>&1 || { -- echo -- echo "You must have automake installed to compile $package." -- echo "Download the appropriate package for your system," -- echo "or get the source from one of the GNU ftp sites" -- echo "listed in http://www.gnu.org/order/ftp.html" -- DIE=1 --} -- --(libtool --version) < /dev/null > /dev/null 2>&1 || { -- echo -- echo "You must have libtool installed to compile $package." -- echo "Download the appropriate package for your system," -- echo "or get the source from one of the GNU ftp sites" -- echo "listed in http://www.gnu.org/order/ftp.html" -- DIE=1 --} -- --if test "$DIE" -eq 1; then -- exit 1 --fi -- --echo "Running aclocal" --aclocal --echo "Running autoconf" --autoconf --force --echo "Running libtoolize" --libtoolize --automake --echo "Running automake -ac" --automake -ac ---- a/libraries/libapparmor/compile -+++ /dev/null -@@ -1,143 +0,0 @@ --#! /bin/sh --# Wrapper for compilers which do not understand `-c -o'. -- --scriptversion=2009-10-06.20; # UTC -- --# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2009 Free Software --# Foundation, Inc. --# Written by Tom Tromey . --# --# This program is free software; you can redistribute it and/or modify --# it under the terms of the GNU General Public License as published by --# the Free Software Foundation; either version 2, or (at your option) --# any later version. --# --# This program is distributed in the hope that it will be useful, --# but WITHOUT ANY WARRANTY; without even the implied warranty of --# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the --# GNU General Public License for more details. --# --# You should have received a copy of the GNU General Public License --# along with this program. If not, see . -- --# As a special exception to the GNU General Public License, if you --# distribute this file as part of a program that contains a --# configuration script generated by Autoconf, you may include it under --# the same distribution terms that you use for the rest of that program. -- --# This file is maintained in Automake, please report --# bugs to or send patches to --# . -- --case $1 in -- '') -- echo "$0: No command. Try \`$0 --help' for more information." 1>&2 -- exit 1; -- ;; -- -h | --h*) -- cat <<\EOF --Usage: compile [--help] [--version] PROGRAM [ARGS] -- --Wrapper for compilers which do not understand `-c -o'. --Remove `-o dest.o' from ARGS, run PROGRAM with the remaining --arguments, and rename the output as expected. -- --If you are trying to build a whole package this is not the --right script to run: please start by reading the file `INSTALL'. -- --Report bugs to . --EOF -- exit $? -- ;; -- -v | --v*) -- echo "compile $scriptversion" -- exit $? -- ;; --esac -- --ofile= --cfile= --eat= -- --for arg --do -- if test -n "$eat"; then -- eat= -- else -- case $1 in -- -o) -- # configure might choose to run compile as `compile cc -o foo foo.c'. -- # So we strip `-o arg' only if arg is an object. -- eat=1 -- case $2 in -- *.o | *.obj) -- ofile=$2 -- ;; -- *) -- set x "$@" -o "$2" -- shift -- ;; -- esac -- ;; -- *.c) -- cfile=$1 -- set x "$@" "$1" -- shift -- ;; -- *) -- set x "$@" "$1" -- shift -- ;; -- esac -- fi -- shift --done -- --if test -z "$ofile" || test -z "$cfile"; then -- # If no `-o' option was seen then we might have been invoked from a -- # pattern rule where we don't need one. That is ok -- this is a -- # normal compilation that the losing compiler can handle. If no -- # `.c' file was seen then we are probably linking. That is also -- # ok. -- exec "$@" --fi -- --# Name of file we expect compiler to create. --cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'` -- --# Create the lock directory. --# Note: use `[/\\:.-]' here to ensure that we don't use the same name --# that we are using for the .o file. Also, base the name on the expected --# object file name, since that is what matters with a parallel build. --lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d --while true; do -- if mkdir "$lockdir" >/dev/null 2>&1; then -- break -- fi -- sleep 1 --done --# FIXME: race condition here if user kills between mkdir and trap. --trap "rmdir '$lockdir'; exit 1" 1 2 15 -- --# Run the compile. --"$@" --ret=$? -- --if test -f "$cofile"; then -- test "$cofile" = "$ofile" || mv "$cofile" "$ofile" --elif test -f "${cofile}bj"; then -- test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile" --fi -- --rmdir "$lockdir" --exit $ret -- --# Local Variables: --# mode: shell-script --# sh-indentation: 2 --# eval: (add-hook 'write-file-hooks 'time-stamp) --# time-stamp-start: "scriptversion=" --# time-stamp-format: "%:y-%02m-%02d.%02H" --# time-stamp-time-zone: "UTC" --# time-stamp-end: "; # UTC" --# End: ---- a/libraries/libapparmor/config.guess -+++ /dev/null -@@ -1,1502 +0,0 @@ --#! /bin/sh --# Attempt to guess a canonical system name. --# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, --# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 --# Free Software Foundation, Inc. -- --timestamp='2009-12-30' -- --# This file is free software; you can redistribute it and/or modify it --# under the terms of the GNU General Public License as published by --# the Free Software Foundation; either version 2 of the License, or --# (at your option) any later version. --# --# This program is distributed in the hope that it will be useful, but --# WITHOUT ANY WARRANTY; without even the implied warranty of --# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU --# General Public License for more details. --# --# You should have received a copy of the GNU General Public License --# along with this program; if not, write to the Free Software --# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA --# 02110-1301, USA. --# --# As a special exception to the GNU General Public License, if you --# distribute this file as part of a program that contains a --# configuration script generated by Autoconf, you may include it under --# the same distribution terms that you use for the rest of that program. -- -- --# Originally written by Per Bothner. Please send patches (context --# diff format) to and include a ChangeLog --# entry. --# --# This script attempts to guess a canonical system name similar to --# config.sub. If it succeeds, it prints the system name on stdout, and --# exits with 0. Otherwise, it exits with 1. --# --# You can get the latest version of this script from: --# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD -- --me=`echo "$0" | sed -e 's,.*/,,'` -- --usage="\ --Usage: $0 [OPTION] -- --Output the configuration name of the system \`$me' is run on. -- --Operation modes: -- -h, --help print this help, then exit -- -t, --time-stamp print date of last modification, then exit -- -v, --version print version number, then exit -- --Report bugs and patches to ." -- --version="\ --GNU config.guess ($timestamp) -- --Originally written by Per Bothner. --Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, --2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free --Software Foundation, Inc. -- --This is free software; see the source for copying conditions. There is NO --warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." -- --help=" --Try \`$me --help' for more information." -- --# Parse command line --while test $# -gt 0 ; do -- case $1 in -- --time-stamp | --time* | -t ) -- echo "$timestamp" ; exit ;; -- --version | -v ) -- echo "$version" ; exit ;; -- --help | --h* | -h ) -- echo "$usage"; exit ;; -- -- ) # Stop option processing -- shift; break ;; -- - ) # Use stdin as input. -- break ;; -- -* ) -- echo "$me: invalid option $1$help" >&2 -- exit 1 ;; -- * ) -- break ;; -- esac --done -- --if test $# != 0; then -- echo "$me: too many arguments$help" >&2 -- exit 1 --fi -- --trap 'exit 1' 1 2 15 -- --# CC_FOR_BUILD -- compiler used by this script. Note that the use of a --# compiler to aid in system detection is discouraged as it requires --# temporary files to be created and, as you can see below, it is a --# headache to deal with in a portable fashion. -- --# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still --# use `HOST_CC' if defined, but it is deprecated. -- --# Portable tmp directory creation inspired by the Autoconf team. -- --set_cc_for_build=' --trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; --trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; --: ${TMPDIR=/tmp} ; -- { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || -- { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || -- { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || -- { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; --dummy=$tmp/dummy ; --tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; --case $CC_FOR_BUILD,$HOST_CC,$CC in -- ,,) echo "int x;" > $dummy.c ; -- for c in cc gcc c89 c99 ; do -- if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then -- CC_FOR_BUILD="$c"; break ; -- fi ; -- done ; -- if test x"$CC_FOR_BUILD" = x ; then -- CC_FOR_BUILD=no_compiler_found ; -- fi -- ;; -- ,,*) CC_FOR_BUILD=$CC ;; -- ,*,*) CC_FOR_BUILD=$HOST_CC ;; --esac ; set_cc_for_build= ;' -- --# This is needed to find uname on a Pyramid OSx when run in the BSD universe. --# (ghazi@noc.rutgers.edu 1994-08-24) --if (test -f /.attbin/uname) >/dev/null 2>&1 ; then -- PATH=$PATH:/.attbin ; export PATH --fi -- --UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown --UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown --UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown --UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown -- --# Note: order is significant - the case branches are not exclusive. -- --case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in -- *:NetBSD:*:*) -- # NetBSD (nbsd) targets should (where applicable) match one or -- # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*, -- # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently -- # switched to ELF, *-*-netbsd* would select the old -- # object file format. This provides both forward -- # compatibility and a consistent mechanism for selecting the -- # object file format. -- # -- # Note: NetBSD doesn't particularly care about the vendor -- # portion of the name. We always set it to "unknown". -- sysctl="sysctl -n hw.machine_arch" -- UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ -- /usr/sbin/$sysctl 2>/dev/null || echo unknown)` -- case "${UNAME_MACHINE_ARCH}" in -- armeb) machine=armeb-unknown ;; -- arm*) machine=arm-unknown ;; -- sh3el) machine=shl-unknown ;; -- sh3eb) machine=sh-unknown ;; -- sh5el) machine=sh5le-unknown ;; -- *) machine=${UNAME_MACHINE_ARCH}-unknown ;; -- esac -- # The Operating System including object format, if it has switched -- # to ELF recently, or will in the future. -- case "${UNAME_MACHINE_ARCH}" in -- arm*|i386|m68k|ns32k|sh3*|sparc|vax) -- eval $set_cc_for_build -- if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ -- | grep -q __ELF__ -- then -- # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). -- # Return netbsd for either. FIX? -- os=netbsd -- else -- os=netbsdelf -- fi -- ;; -- *) -- os=netbsd -- ;; -- esac -- # The OS release -- # Debian GNU/NetBSD machines have a different userland, and -- # thus, need a distinct triplet. However, they do not need -- # kernel version information, so it can be replaced with a -- # suitable tag, in the style of linux-gnu. -- case "${UNAME_VERSION}" in -- Debian*) -- release='-gnu' -- ;; -- *) -- release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` -- ;; -- esac -- # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: -- # contains redundant information, the shorter form: -- # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. -- echo "${machine}-${os}${release}" -- exit ;; -- *:OpenBSD:*:*) -- UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` -- echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} -- exit ;; -- *:ekkoBSD:*:*) -- echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} -- exit ;; -- *:SolidBSD:*:*) -- echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} -- exit ;; -- macppc:MirBSD:*:*) -- echo powerpc-unknown-mirbsd${UNAME_RELEASE} -- exit ;; -- *:MirBSD:*:*) -- echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} -- exit ;; -- alpha:OSF1:*:*) -- case $UNAME_RELEASE in -- *4.0) -- UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` -- ;; -- *5.*) -- UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` -- ;; -- esac -- # According to Compaq, /usr/sbin/psrinfo has been available on -- # OSF/1 and Tru64 systems produced since 1995. I hope that -- # covers most systems running today. This code pipes the CPU -- # types through head -n 1, so we only detect the type of CPU 0. -- ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` -- case "$ALPHA_CPU_TYPE" in -- "EV4 (21064)") -- UNAME_MACHINE="alpha" ;; -- "EV4.5 (21064)") -- UNAME_MACHINE="alpha" ;; -- "LCA4 (21066/21068)") -- UNAME_MACHINE="alpha" ;; -- "EV5 (21164)") -- UNAME_MACHINE="alphaev5" ;; -- "EV5.6 (21164A)") -- UNAME_MACHINE="alphaev56" ;; -- "EV5.6 (21164PC)") -- UNAME_MACHINE="alphapca56" ;; -- "EV5.7 (21164PC)") -- UNAME_MACHINE="alphapca57" ;; -- "EV6 (21264)") -- UNAME_MACHINE="alphaev6" ;; -- "EV6.7 (21264A)") -- UNAME_MACHINE="alphaev67" ;; -- "EV6.8CB (21264C)") -- UNAME_MACHINE="alphaev68" ;; -- "EV6.8AL (21264B)") -- UNAME_MACHINE="alphaev68" ;; -- "EV6.8CX (21264D)") -- UNAME_MACHINE="alphaev68" ;; -- "EV6.9A (21264/EV69A)") -- UNAME_MACHINE="alphaev69" ;; -- "EV7 (21364)") -- UNAME_MACHINE="alphaev7" ;; -- "EV7.9 (21364A)") -- UNAME_MACHINE="alphaev79" ;; -- esac -- # A Pn.n version is a patched version. -- # A Vn.n version is a released version. -- # A Tn.n version is a released field test version. -- # A Xn.n version is an unreleased experimental baselevel. -- # 1.2 uses "1.2" for uname -r. -- echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` -- exit ;; -- Alpha\ *:Windows_NT*:*) -- # How do we know it's Interix rather than the generic POSIX subsystem? -- # Should we change UNAME_MACHINE based on the output of uname instead -- # of the specific Alpha model? -- echo alpha-pc-interix -- exit ;; -- 21064:Windows_NT:50:3) -- echo alpha-dec-winnt3.5 -- exit ;; -- Amiga*:UNIX_System_V:4.0:*) -- echo m68k-unknown-sysv4 -- exit ;; -- *:[Aa]miga[Oo][Ss]:*:*) -- echo ${UNAME_MACHINE}-unknown-amigaos -- exit ;; -- *:[Mm]orph[Oo][Ss]:*:*) -- echo ${UNAME_MACHINE}-unknown-morphos -- exit ;; -- *:OS/390:*:*) -- echo i370-ibm-openedition -- exit ;; -- *:z/VM:*:*) -- echo s390-ibm-zvmoe -- exit ;; -- *:OS400:*:*) -- echo powerpc-ibm-os400 -- exit ;; -- arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) -- echo arm-acorn-riscix${UNAME_RELEASE} -- exit ;; -- arm:riscos:*:*|arm:RISCOS:*:*) -- echo arm-unknown-riscos -- exit ;; -- SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) -- echo hppa1.1-hitachi-hiuxmpp -- exit ;; -- Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) -- # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. -- if test "`(/bin/universe) 2>/dev/null`" = att ; then -- echo pyramid-pyramid-sysv3 -- else -- echo pyramid-pyramid-bsd -- fi -- exit ;; -- NILE*:*:*:dcosx) -- echo pyramid-pyramid-svr4 -- exit ;; -- DRS?6000:unix:4.0:6*) -- echo sparc-icl-nx6 -- exit ;; -- DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) -- case `/usr/bin/uname -p` in -- sparc) echo sparc-icl-nx7; exit ;; -- esac ;; -- s390x:SunOS:*:*) -- echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` -- exit ;; -- sun4H:SunOS:5.*:*) -- echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` -- exit ;; -- sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) -- echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` -- exit ;; -- i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) -- echo i386-pc-auroraux${UNAME_RELEASE} -- exit ;; -- i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) -- eval $set_cc_for_build -- SUN_ARCH="i386" -- # If there is a compiler, see if it is configured for 64-bit objects. -- # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. -- # This test works for both compilers. -- if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then -- if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ -- (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ -- grep IS_64BIT_ARCH >/dev/null -- then -- SUN_ARCH="x86_64" -- fi -- fi -- echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` -- exit ;; -- sun4*:SunOS:6*:*) -- # According to config.sub, this is the proper way to canonicalize -- # SunOS6. Hard to guess exactly what SunOS6 will be like, but -- # it's likely to be more like Solaris than SunOS4. -- echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` -- exit ;; -- sun4*:SunOS:*:*) -- case "`/usr/bin/arch -k`" in -- Series*|S4*) -- UNAME_RELEASE=`uname -v` -- ;; -- esac -- # Japanese Language versions have a version number like `4.1.3-JL'. -- echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` -- exit ;; -- sun3*:SunOS:*:*) -- echo m68k-sun-sunos${UNAME_RELEASE} -- exit ;; -- sun*:*:4.2BSD:*) -- UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` -- test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 -- case "`/bin/arch`" in -- sun3) -- echo m68k-sun-sunos${UNAME_RELEASE} -- ;; -- sun4) -- echo sparc-sun-sunos${UNAME_RELEASE} -- ;; -- esac -- exit ;; -- aushp:SunOS:*:*) -- echo sparc-auspex-sunos${UNAME_RELEASE} -- exit ;; -- # The situation for MiNT is a little confusing. The machine name -- # can be virtually everything (everything which is not -- # "atarist" or "atariste" at least should have a processor -- # > m68000). The system name ranges from "MiNT" over "FreeMiNT" -- # to the lowercase version "mint" (or "freemint"). Finally -- # the system name "TOS" denotes a system which is actually not -- # MiNT. But MiNT is downward compatible to TOS, so this should -- # be no problem. -- atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) -- echo m68k-atari-mint${UNAME_RELEASE} -- exit ;; -- atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) -- echo m68k-atari-mint${UNAME_RELEASE} -- exit ;; -- *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) -- echo m68k-atari-mint${UNAME_RELEASE} -- exit ;; -- milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) -- echo m68k-milan-mint${UNAME_RELEASE} -- exit ;; -- hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) -- echo m68k-hades-mint${UNAME_RELEASE} -- exit ;; -- *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) -- echo m68k-unknown-mint${UNAME_RELEASE} -- exit ;; -- m68k:machten:*:*) -- echo m68k-apple-machten${UNAME_RELEASE} -- exit ;; -- powerpc:machten:*:*) -- echo powerpc-apple-machten${UNAME_RELEASE} -- exit ;; -- RISC*:Mach:*:*) -- echo mips-dec-mach_bsd4.3 -- exit ;; -- RISC*:ULTRIX:*:*) -- echo mips-dec-ultrix${UNAME_RELEASE} -- exit ;; -- VAX*:ULTRIX*:*:*) -- echo vax-dec-ultrix${UNAME_RELEASE} -- exit ;; -- 2020:CLIX:*:* | 2430:CLIX:*:*) -- echo clipper-intergraph-clix${UNAME_RELEASE} -- exit ;; -- mips:*:*:UMIPS | mips:*:*:RISCos) -- eval $set_cc_for_build -- sed 's/^ //' << EOF >$dummy.c --#ifdef __cplusplus --#include /* for printf() prototype */ -- int main (int argc, char *argv[]) { --#else -- int main (argc, argv) int argc; char *argv[]; { --#endif -- #if defined (host_mips) && defined (MIPSEB) -- #if defined (SYSTYPE_SYSV) -- printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); -- #endif -- #if defined (SYSTYPE_SVR4) -- printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); -- #endif -- #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) -- printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); -- #endif -- #endif -- exit (-1); -- } --EOF -- $CC_FOR_BUILD -o $dummy $dummy.c && -- dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && -- SYSTEM_NAME=`$dummy $dummyarg` && -- { echo "$SYSTEM_NAME"; exit; } -- echo mips-mips-riscos${UNAME_RELEASE} -- exit ;; -- Motorola:PowerMAX_OS:*:*) -- echo powerpc-motorola-powermax -- exit ;; -- Motorola:*:4.3:PL8-*) -- echo powerpc-harris-powermax -- exit ;; -- Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) -- echo powerpc-harris-powermax -- exit ;; -- Night_Hawk:Power_UNIX:*:*) -- echo powerpc-harris-powerunix -- exit ;; -- m88k:CX/UX:7*:*) -- echo m88k-harris-cxux7 -- exit ;; -- m88k:*:4*:R4*) -- echo m88k-motorola-sysv4 -- exit ;; -- m88k:*:3*:R3*) -- echo m88k-motorola-sysv3 -- exit ;; -- AViiON:dgux:*:*) -- # DG/UX returns AViiON for all architectures -- UNAME_PROCESSOR=`/usr/bin/uname -p` -- if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] -- then -- if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ -- [ ${TARGET_BINARY_INTERFACE}x = x ] -- then -- echo m88k-dg-dgux${UNAME_RELEASE} -- else -- echo m88k-dg-dguxbcs${UNAME_RELEASE} -- fi -- else -- echo i586-dg-dgux${UNAME_RELEASE} -- fi -- exit ;; -- M88*:DolphinOS:*:*) # DolphinOS (SVR3) -- echo m88k-dolphin-sysv3 -- exit ;; -- M88*:*:R3*:*) -- # Delta 88k system running SVR3 -- echo m88k-motorola-sysv3 -- exit ;; -- XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) -- echo m88k-tektronix-sysv3 -- exit ;; -- Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) -- echo m68k-tektronix-bsd -- exit ;; -- *:IRIX*:*:*) -- echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` -- exit ;; -- ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. -- echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id -- exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' -- i*86:AIX:*:*) -- echo i386-ibm-aix -- exit ;; -- ia64:AIX:*:*) -- if [ -x /usr/bin/oslevel ] ; then -- IBM_REV=`/usr/bin/oslevel` -- else -- IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} -- fi -- echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} -- exit ;; -- *:AIX:2:3) -- if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then -- eval $set_cc_for_build -- sed 's/^ //' << EOF >$dummy.c -- #include -- -- main() -- { -- if (!__power_pc()) -- exit(1); -- puts("powerpc-ibm-aix3.2.5"); -- exit(0); -- } --EOF -- if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` -- then -- echo "$SYSTEM_NAME" -- else -- echo rs6000-ibm-aix3.2.5 -- fi -- elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then -- echo rs6000-ibm-aix3.2.4 -- else -- echo rs6000-ibm-aix3.2 -- fi -- exit ;; -- *:AIX:*:[456]) -- IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` -- if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then -- IBM_ARCH=rs6000 -- else -- IBM_ARCH=powerpc -- fi -- if [ -x /usr/bin/oslevel ] ; then -- IBM_REV=`/usr/bin/oslevel` -- else -- IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} -- fi -- echo ${IBM_ARCH}-ibm-aix${IBM_REV} -- exit ;; -- *:AIX:*:*) -- echo rs6000-ibm-aix -- exit ;; -- ibmrt:4.4BSD:*|romp-ibm:BSD:*) -- echo romp-ibm-bsd4.4 -- exit ;; -- ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and -- echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to -- exit ;; # report: romp-ibm BSD 4.3 -- *:BOSX:*:*) -- echo rs6000-bull-bosx -- exit ;; -- DPX/2?00:B.O.S.:*:*) -- echo m68k-bull-sysv3 -- exit ;; -- 9000/[34]??:4.3bsd:1.*:*) -- echo m68k-hp-bsd -- exit ;; -- hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) -- echo m68k-hp-bsd4.4 -- exit ;; -- 9000/[34678]??:HP-UX:*:*) -- HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` -- case "${UNAME_MACHINE}" in -- 9000/31? ) HP_ARCH=m68000 ;; -- 9000/[34]?? ) HP_ARCH=m68k ;; -- 9000/[678][0-9][0-9]) -- if [ -x /usr/bin/getconf ]; then -- sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` -- sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` -- case "${sc_cpu_version}" in -- 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 -- 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 -- 532) # CPU_PA_RISC2_0 -- case "${sc_kernel_bits}" in -- 32) HP_ARCH="hppa2.0n" ;; -- 64) HP_ARCH="hppa2.0w" ;; -- '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 -- esac ;; -- esac -- fi -- if [ "${HP_ARCH}" = "" ]; then -- eval $set_cc_for_build -- sed 's/^ //' << EOF >$dummy.c -- -- #define _HPUX_SOURCE -- #include -- #include -- -- int main () -- { -- #if defined(_SC_KERNEL_BITS) -- long bits = sysconf(_SC_KERNEL_BITS); -- #endif -- long cpu = sysconf (_SC_CPU_VERSION); -- -- switch (cpu) -- { -- case CPU_PA_RISC1_0: puts ("hppa1.0"); break; -- case CPU_PA_RISC1_1: puts ("hppa1.1"); break; -- case CPU_PA_RISC2_0: -- #if defined(_SC_KERNEL_BITS) -- switch (bits) -- { -- case 64: puts ("hppa2.0w"); break; -- case 32: puts ("hppa2.0n"); break; -- default: puts ("hppa2.0"); break; -- } break; -- #else /* !defined(_SC_KERNEL_BITS) */ -- puts ("hppa2.0"); break; -- #endif -- default: puts ("hppa1.0"); break; -- } -- exit (0); -- } --EOF -- (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` -- test -z "$HP_ARCH" && HP_ARCH=hppa -- fi ;; -- esac -- if [ ${HP_ARCH} = "hppa2.0w" ] -- then -- eval $set_cc_for_build -- -- # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating -- # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler -- # generating 64-bit code. GNU and HP use different nomenclature: -- # -- # $ CC_FOR_BUILD=cc ./config.guess -- # => hppa2.0w-hp-hpux11.23 -- # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess -- # => hppa64-hp-hpux11.23 -- -- if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | -- grep -q __LP64__ -- then -- HP_ARCH="hppa2.0w" -- else -- HP_ARCH="hppa64" -- fi -- fi -- echo ${HP_ARCH}-hp-hpux${HPUX_REV} -- exit ;; -- ia64:HP-UX:*:*) -- HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` -- echo ia64-hp-hpux${HPUX_REV} -- exit ;; -- 3050*:HI-UX:*:*) -- eval $set_cc_for_build -- sed 's/^ //' << EOF >$dummy.c -- #include -- int -- main () -- { -- long cpu = sysconf (_SC_CPU_VERSION); -- /* The order matters, because CPU_IS_HP_MC68K erroneously returns -- true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct -- results, however. */ -- if (CPU_IS_PA_RISC (cpu)) -- { -- switch (cpu) -- { -- case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; -- case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; -- case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; -- default: puts ("hppa-hitachi-hiuxwe2"); break; -- } -- } -- else if (CPU_IS_HP_MC68K (cpu)) -- puts ("m68k-hitachi-hiuxwe2"); -- else puts ("unknown-hitachi-hiuxwe2"); -- exit (0); -- } --EOF -- $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && -- { echo "$SYSTEM_NAME"; exit; } -- echo unknown-hitachi-hiuxwe2 -- exit ;; -- 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) -- echo hppa1.1-hp-bsd -- exit ;; -- 9000/8??:4.3bsd:*:*) -- echo hppa1.0-hp-bsd -- exit ;; -- *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) -- echo hppa1.0-hp-mpeix -- exit ;; -- hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) -- echo hppa1.1-hp-osf -- exit ;; -- hp8??:OSF1:*:*) -- echo hppa1.0-hp-osf -- exit ;; -- i*86:OSF1:*:*) -- if [ -x /usr/sbin/sysversion ] ; then -- echo ${UNAME_MACHINE}-unknown-osf1mk -- else -- echo ${UNAME_MACHINE}-unknown-osf1 -- fi -- exit ;; -- parisc*:Lites*:*:*) -- echo hppa1.1-hp-lites -- exit ;; -- C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) -- echo c1-convex-bsd -- exit ;; -- C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) -- if getsysinfo -f scalar_acc -- then echo c32-convex-bsd -- else echo c2-convex-bsd -- fi -- exit ;; -- C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) -- echo c34-convex-bsd -- exit ;; -- C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) -- echo c38-convex-bsd -- exit ;; -- C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) -- echo c4-convex-bsd -- exit ;; -- CRAY*Y-MP:*:*:*) -- echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' -- exit ;; -- CRAY*[A-Z]90:*:*:*) -- echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ -- | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ -- -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ -- -e 's/\.[^.]*$/.X/' -- exit ;; -- CRAY*TS:*:*:*) -- echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' -- exit ;; -- CRAY*T3E:*:*:*) -- echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' -- exit ;; -- CRAY*SV1:*:*:*) -- echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' -- exit ;; -- *:UNICOS/mp:*:*) -- echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' -- exit ;; -- F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) -- FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` -- FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` -- FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` -- echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" -- exit ;; -- 5000:UNIX_System_V:4.*:*) -- FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` -- FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` -- echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" -- exit ;; -- i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) -- echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} -- exit ;; -- sparc*:BSD/OS:*:*) -- echo sparc-unknown-bsdi${UNAME_RELEASE} -- exit ;; -- *:BSD/OS:*:*) -- echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} -- exit ;; -- *:FreeBSD:*:*) -- case ${UNAME_MACHINE} in -- pc98) -- echo i386-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; -- amd64) -- echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; -- *) -- echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; -- esac -- exit ;; -- i*:CYGWIN*:*) -- echo ${UNAME_MACHINE}-pc-cygwin -- exit ;; -- *:MINGW*:*) -- echo ${UNAME_MACHINE}-pc-mingw32 -- exit ;; -- i*:windows32*:*) -- # uname -m includes "-pc" on this system. -- echo ${UNAME_MACHINE}-mingw32 -- exit ;; -- i*:PW*:*) -- echo ${UNAME_MACHINE}-pc-pw32 -- exit ;; -- *:Interix*:*) -- case ${UNAME_MACHINE} in -- x86) -- echo i586-pc-interix${UNAME_RELEASE} -- exit ;; -- authenticamd | genuineintel | EM64T) -- echo x86_64-unknown-interix${UNAME_RELEASE} -- exit ;; -- IA64) -- echo ia64-unknown-interix${UNAME_RELEASE} -- exit ;; -- esac ;; -- [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) -- echo i${UNAME_MACHINE}-pc-mks -- exit ;; -- 8664:Windows_NT:*) -- echo x86_64-pc-mks -- exit ;; -- i*:Windows_NT*:* | Pentium*:Windows_NT*:*) -- # How do we know it's Interix rather than the generic POSIX subsystem? -- # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we -- # UNAME_MACHINE based on the output of uname instead of i386? -- echo i586-pc-interix -- exit ;; -- i*:UWIN*:*) -- echo ${UNAME_MACHINE}-pc-uwin -- exit ;; -- amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) -- echo x86_64-unknown-cygwin -- exit ;; -- p*:CYGWIN*:*) -- echo powerpcle-unknown-cygwin -- exit ;; -- prep*:SunOS:5.*:*) -- echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` -- exit ;; -- *:GNU:*:*) -- # the GNU system -- echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` -- exit ;; -- *:GNU/*:*:*) -- # other systems with GNU libc and userland -- echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu -- exit ;; -- i*86:Minix:*:*) -- echo ${UNAME_MACHINE}-pc-minix -- exit ;; -- alpha:Linux:*:*) -- case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in -- EV5) UNAME_MACHINE=alphaev5 ;; -- EV56) UNAME_MACHINE=alphaev56 ;; -- PCA56) UNAME_MACHINE=alphapca56 ;; -- PCA57) UNAME_MACHINE=alphapca56 ;; -- EV6) UNAME_MACHINE=alphaev6 ;; -- EV67) UNAME_MACHINE=alphaev67 ;; -- EV68*) UNAME_MACHINE=alphaev68 ;; -- esac -- objdump --private-headers /bin/sh | grep -q ld.so.1 -- if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi -- echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC} -- exit ;; -- arm*:Linux:*:*) -- eval $set_cc_for_build -- if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ -- | grep -q __ARM_EABI__ -- then -- echo ${UNAME_MACHINE}-unknown-linux-gnu -- else -- echo ${UNAME_MACHINE}-unknown-linux-gnueabi -- fi -- exit ;; -- avr32*:Linux:*:*) -- echo ${UNAME_MACHINE}-unknown-linux-gnu -- exit ;; -- cris:Linux:*:*) -- echo cris-axis-linux-gnu -- exit ;; -- crisv32:Linux:*:*) -- echo crisv32-axis-linux-gnu -- exit ;; -- frv:Linux:*:*) -- echo frv-unknown-linux-gnu -- exit ;; -- i*86:Linux:*:*) -- LIBC=gnu -- eval $set_cc_for_build -- sed 's/^ //' << EOF >$dummy.c -- #ifdef __dietlibc__ -- LIBC=dietlibc -- #endif --EOF -- eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'` -- echo "${UNAME_MACHINE}-pc-linux-${LIBC}" -- exit ;; -- ia64:Linux:*:*) -- echo ${UNAME_MACHINE}-unknown-linux-gnu -- exit ;; -- m32r*:Linux:*:*) -- echo ${UNAME_MACHINE}-unknown-linux-gnu -- exit ;; -- m68*:Linux:*:*) -- echo ${UNAME_MACHINE}-unknown-linux-gnu -- exit ;; -- mips:Linux:*:* | mips64:Linux:*:*) -- eval $set_cc_for_build -- sed 's/^ //' << EOF >$dummy.c -- #undef CPU -- #undef ${UNAME_MACHINE} -- #undef ${UNAME_MACHINE}el -- #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) -- CPU=${UNAME_MACHINE}el -- #else -- #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) -- CPU=${UNAME_MACHINE} -- #else -- CPU= -- #endif -- #endif --EOF -- eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` -- test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; } -- ;; -- or32:Linux:*:*) -- echo or32-unknown-linux-gnu -- exit ;; -- padre:Linux:*:*) -- echo sparc-unknown-linux-gnu -- exit ;; -- parisc64:Linux:*:* | hppa64:Linux:*:*) -- echo hppa64-unknown-linux-gnu -- exit ;; -- parisc:Linux:*:* | hppa:Linux:*:*) -- # Look for CPU level -- case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in -- PA7*) echo hppa1.1-unknown-linux-gnu ;; -- PA8*) echo hppa2.0-unknown-linux-gnu ;; -- *) echo hppa-unknown-linux-gnu ;; -- esac -- exit ;; -- ppc64:Linux:*:*) -- echo powerpc64-unknown-linux-gnu -- exit ;; -- ppc:Linux:*:*) -- echo powerpc-unknown-linux-gnu -- exit ;; -- s390:Linux:*:* | s390x:Linux:*:*) -- echo ${UNAME_MACHINE}-ibm-linux -- exit ;; -- sh64*:Linux:*:*) -- echo ${UNAME_MACHINE}-unknown-linux-gnu -- exit ;; -- sh*:Linux:*:*) -- echo ${UNAME_MACHINE}-unknown-linux-gnu -- exit ;; -- sparc:Linux:*:* | sparc64:Linux:*:*) -- echo ${UNAME_MACHINE}-unknown-linux-gnu -- exit ;; -- vax:Linux:*:*) -- echo ${UNAME_MACHINE}-dec-linux-gnu -- exit ;; -- x86_64:Linux:*:*) -- echo x86_64-unknown-linux-gnu -- exit ;; -- xtensa*:Linux:*:*) -- echo ${UNAME_MACHINE}-unknown-linux-gnu -- exit ;; -- i*86:DYNIX/ptx:4*:*) -- # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. -- # earlier versions are messed up and put the nodename in both -- # sysname and nodename. -- echo i386-sequent-sysv4 -- exit ;; -- i*86:UNIX_SV:4.2MP:2.*) -- # Unixware is an offshoot of SVR4, but it has its own version -- # number series starting with 2... -- # I am not positive that other SVR4 systems won't match this, -- # I just have to hope. -- rms. -- # Use sysv4.2uw... so that sysv4* matches it. -- echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} -- exit ;; -- i*86:OS/2:*:*) -- # If we were able to find `uname', then EMX Unix compatibility -- # is probably installed. -- echo ${UNAME_MACHINE}-pc-os2-emx -- exit ;; -- i*86:XTS-300:*:STOP) -- echo ${UNAME_MACHINE}-unknown-stop -- exit ;; -- i*86:atheos:*:*) -- echo ${UNAME_MACHINE}-unknown-atheos -- exit ;; -- i*86:syllable:*:*) -- echo ${UNAME_MACHINE}-pc-syllable -- exit ;; -- i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) -- echo i386-unknown-lynxos${UNAME_RELEASE} -- exit ;; -- i*86:*DOS:*:*) -- echo ${UNAME_MACHINE}-pc-msdosdjgpp -- exit ;; -- i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) -- UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` -- if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then -- echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} -- else -- echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} -- fi -- exit ;; -- i*86:*:5:[678]*) -- # UnixWare 7.x, OpenUNIX and OpenServer 6. -- case `/bin/uname -X | grep "^Machine"` in -- *486*) UNAME_MACHINE=i486 ;; -- *Pentium) UNAME_MACHINE=i586 ;; -- *Pent*|*Celeron) UNAME_MACHINE=i686 ;; -- esac -- echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} -- exit ;; -- i*86:*:3.2:*) -- if test -f /usr/options/cb.name; then -- UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then -- UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` -- (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 -- (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ -- && UNAME_MACHINE=i586 -- (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ -- && UNAME_MACHINE=i686 -- (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ -- && UNAME_MACHINE=i686 -- echo ${UNAME_MACHINE}-pc-sco$UNAME_REL -- else -- echo ${UNAME_MACHINE}-pc-sysv32 -- fi -- exit ;; -- pc:*:*:*) -- # Left here for compatibility: -- # uname -m prints for DJGPP always 'pc', but it prints nothing about -- # the processor, so we play safe by assuming i586. -- # Note: whatever this is, it MUST be the same as what config.sub -- # prints for the "djgpp" host, or else GDB configury will decide that -- # this is a cross-build. -- echo i586-pc-msdosdjgpp -- exit ;; -- Intel:Mach:3*:*) -- echo i386-pc-mach3 -- exit ;; -- paragon:*:*:*) -- echo i860-intel-osf1 -- exit ;; -- i860:*:4.*:*) # i860-SVR4 -- if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then -- echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 -- else # Add other i860-SVR4 vendors below as they are discovered. -- echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 -- fi -- exit ;; -- mini*:CTIX:SYS*5:*) -- # "miniframe" -- echo m68010-convergent-sysv -- exit ;; -- mc68k:UNIX:SYSTEM5:3.51m) -- echo m68k-convergent-sysv -- exit ;; -- M680?0:D-NIX:5.3:*) -- echo m68k-diab-dnix -- exit ;; -- M68*:*:R3V[5678]*:*) -- test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; -- 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) -- OS_REL='' -- test -r /etc/.relid \ -- && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` -- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ -- && { echo i486-ncr-sysv4.3${OS_REL}; exit; } -- /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ -- && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; -- 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) -- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ -- && { echo i486-ncr-sysv4; exit; } ;; -- NCR*:*:4.2:* | MPRAS*:*:4.2:*) -- OS_REL='.3' -- test -r /etc/.relid \ -- && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` -- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ -- && { echo i486-ncr-sysv4.3${OS_REL}; exit; } -- /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ -- && { echo i586-ncr-sysv4.3${OS_REL}; exit; } -- /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ -- && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; -- m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) -- echo m68k-unknown-lynxos${UNAME_RELEASE} -- exit ;; -- mc68030:UNIX_System_V:4.*:*) -- echo m68k-atari-sysv4 -- exit ;; -- TSUNAMI:LynxOS:2.*:*) -- echo sparc-unknown-lynxos${UNAME_RELEASE} -- exit ;; -- rs6000:LynxOS:2.*:*) -- echo rs6000-unknown-lynxos${UNAME_RELEASE} -- exit ;; -- PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) -- echo powerpc-unknown-lynxos${UNAME_RELEASE} -- exit ;; -- SM[BE]S:UNIX_SV:*:*) -- echo mips-dde-sysv${UNAME_RELEASE} -- exit ;; -- RM*:ReliantUNIX-*:*:*) -- echo mips-sni-sysv4 -- exit ;; -- RM*:SINIX-*:*:*) -- echo mips-sni-sysv4 -- exit ;; -- *:SINIX-*:*:*) -- if uname -p 2>/dev/null >/dev/null ; then -- UNAME_MACHINE=`(uname -p) 2>/dev/null` -- echo ${UNAME_MACHINE}-sni-sysv4 -- else -- echo ns32k-sni-sysv -- fi -- exit ;; -- PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort -- # says -- echo i586-unisys-sysv4 -- exit ;; -- *:UNIX_System_V:4*:FTX*) -- # From Gerald Hewes . -- # How about differentiating between stratus architectures? -djm -- echo hppa1.1-stratus-sysv4 -- exit ;; -- *:*:*:FTX*) -- # From seanf@swdc.stratus.com. -- echo i860-stratus-sysv4 -- exit ;; -- i*86:VOS:*:*) -- # From Paul.Green@stratus.com. -- echo ${UNAME_MACHINE}-stratus-vos -- exit ;; -- *:VOS:*:*) -- # From Paul.Green@stratus.com. -- echo hppa1.1-stratus-vos -- exit ;; -- mc68*:A/UX:*:*) -- echo m68k-apple-aux${UNAME_RELEASE} -- exit ;; -- news*:NEWS-OS:6*:*) -- echo mips-sony-newsos6 -- exit ;; -- R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) -- if [ -d /usr/nec ]; then -- echo mips-nec-sysv${UNAME_RELEASE} -- else -- echo mips-unknown-sysv${UNAME_RELEASE} -- fi -- exit ;; -- BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. -- echo powerpc-be-beos -- exit ;; -- BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. -- echo powerpc-apple-beos -- exit ;; -- BePC:BeOS:*:*) # BeOS running on Intel PC compatible. -- echo i586-pc-beos -- exit ;; -- BePC:Haiku:*:*) # Haiku running on Intel PC compatible. -- echo i586-pc-haiku -- exit ;; -- SX-4:SUPER-UX:*:*) -- echo sx4-nec-superux${UNAME_RELEASE} -- exit ;; -- SX-5:SUPER-UX:*:*) -- echo sx5-nec-superux${UNAME_RELEASE} -- exit ;; -- SX-6:SUPER-UX:*:*) -- echo sx6-nec-superux${UNAME_RELEASE} -- exit ;; -- SX-7:SUPER-UX:*:*) -- echo sx7-nec-superux${UNAME_RELEASE} -- exit ;; -- SX-8:SUPER-UX:*:*) -- echo sx8-nec-superux${UNAME_RELEASE} -- exit ;; -- SX-8R:SUPER-UX:*:*) -- echo sx8r-nec-superux${UNAME_RELEASE} -- exit ;; -- Power*:Rhapsody:*:*) -- echo powerpc-apple-rhapsody${UNAME_RELEASE} -- exit ;; -- *:Rhapsody:*:*) -- echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} -- exit ;; -- *:Darwin:*:*) -- UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown -- case $UNAME_PROCESSOR in -- i386) -- eval $set_cc_for_build -- if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then -- if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ -- (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ -- grep IS_64BIT_ARCH >/dev/null -- then -- UNAME_PROCESSOR="x86_64" -- fi -- fi ;; -- unknown) UNAME_PROCESSOR=powerpc ;; -- esac -- echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} -- exit ;; -- *:procnto*:*:* | *:QNX:[0123456789]*:*) -- UNAME_PROCESSOR=`uname -p` -- if test "$UNAME_PROCESSOR" = "x86"; then -- UNAME_PROCESSOR=i386 -- UNAME_MACHINE=pc -- fi -- echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} -- exit ;; -- *:QNX:*:4*) -- echo i386-pc-qnx -- exit ;; -- NSE-?:NONSTOP_KERNEL:*:*) -- echo nse-tandem-nsk${UNAME_RELEASE} -- exit ;; -- NSR-?:NONSTOP_KERNEL:*:*) -- echo nsr-tandem-nsk${UNAME_RELEASE} -- exit ;; -- *:NonStop-UX:*:*) -- echo mips-compaq-nonstopux -- exit ;; -- BS2000:POSIX*:*:*) -- echo bs2000-siemens-sysv -- exit ;; -- DS/*:UNIX_System_V:*:*) -- echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} -- exit ;; -- *:Plan9:*:*) -- # "uname -m" is not consistent, so use $cputype instead. 386 -- # is converted to i386 for consistency with other x86 -- # operating systems. -- if test "$cputype" = "386"; then -- UNAME_MACHINE=i386 -- else -- UNAME_MACHINE="$cputype" -- fi -- echo ${UNAME_MACHINE}-unknown-plan9 -- exit ;; -- *:TOPS-10:*:*) -- echo pdp10-unknown-tops10 -- exit ;; -- *:TENEX:*:*) -- echo pdp10-unknown-tenex -- exit ;; -- KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) -- echo pdp10-dec-tops20 -- exit ;; -- XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) -- echo pdp10-xkl-tops20 -- exit ;; -- *:TOPS-20:*:*) -- echo pdp10-unknown-tops20 -- exit ;; -- *:ITS:*:*) -- echo pdp10-unknown-its -- exit ;; -- SEI:*:*:SEIUX) -- echo mips-sei-seiux${UNAME_RELEASE} -- exit ;; -- *:DragonFly:*:*) -- echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` -- exit ;; -- *:*VMS:*:*) -- UNAME_MACHINE=`(uname -p) 2>/dev/null` -- case "${UNAME_MACHINE}" in -- A*) echo alpha-dec-vms ; exit ;; -- I*) echo ia64-dec-vms ; exit ;; -- V*) echo vax-dec-vms ; exit ;; -- esac ;; -- *:XENIX:*:SysV) -- echo i386-pc-xenix -- exit ;; -- i*86:skyos:*:*) -- echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' -- exit ;; -- i*86:rdos:*:*) -- echo ${UNAME_MACHINE}-pc-rdos -- exit ;; -- i*86:AROS:*:*) -- echo ${UNAME_MACHINE}-pc-aros -- exit ;; --esac -- --#echo '(No uname command or uname output not recognized.)' 1>&2 --#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2 -- --eval $set_cc_for_build --cat >$dummy.c < --# include --#endif --main () --{ --#if defined (sony) --#if defined (MIPSEB) -- /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, -- I don't know.... */ -- printf ("mips-sony-bsd\n"); exit (0); --#else --#include -- printf ("m68k-sony-newsos%s\n", --#ifdef NEWSOS4 -- "4" --#else -- "" --#endif -- ); exit (0); --#endif --#endif -- --#if defined (__arm) && defined (__acorn) && defined (__unix) -- printf ("arm-acorn-riscix\n"); exit (0); --#endif -- --#if defined (hp300) && !defined (hpux) -- printf ("m68k-hp-bsd\n"); exit (0); --#endif -- --#if defined (NeXT) --#if !defined (__ARCHITECTURE__) --#define __ARCHITECTURE__ "m68k" --#endif -- int version; -- version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; -- if (version < 4) -- printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); -- else -- printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); -- exit (0); --#endif -- --#if defined (MULTIMAX) || defined (n16) --#if defined (UMAXV) -- printf ("ns32k-encore-sysv\n"); exit (0); --#else --#if defined (CMU) -- printf ("ns32k-encore-mach\n"); exit (0); --#else -- printf ("ns32k-encore-bsd\n"); exit (0); --#endif --#endif --#endif -- --#if defined (__386BSD__) -- printf ("i386-pc-bsd\n"); exit (0); --#endif -- --#if defined (sequent) --#if defined (i386) -- printf ("i386-sequent-dynix\n"); exit (0); --#endif --#if defined (ns32000) -- printf ("ns32k-sequent-dynix\n"); exit (0); --#endif --#endif -- --#if defined (_SEQUENT_) -- struct utsname un; -- -- uname(&un); -- -- if (strncmp(un.version, "V2", 2) == 0) { -- printf ("i386-sequent-ptx2\n"); exit (0); -- } -- if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ -- printf ("i386-sequent-ptx1\n"); exit (0); -- } -- printf ("i386-sequent-ptx\n"); exit (0); -- --#endif -- --#if defined (vax) --# if !defined (ultrix) --# include --# if defined (BSD) --# if BSD == 43 -- printf ("vax-dec-bsd4.3\n"); exit (0); --# else --# if BSD == 199006 -- printf ("vax-dec-bsd4.3reno\n"); exit (0); --# else -- printf ("vax-dec-bsd\n"); exit (0); --# endif --# endif --# else -- printf ("vax-dec-bsd\n"); exit (0); --# endif --# else -- printf ("vax-dec-ultrix\n"); exit (0); --# endif --#endif -- --#if defined (alliant) && defined (i860) -- printf ("i860-alliant-bsd\n"); exit (0); --#endif -- -- exit (1); --} --EOF -- --$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` && -- { echo "$SYSTEM_NAME"; exit; } -- --# Apollos put the system type in the environment. -- --test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; } -- --# Convex versions that predate uname can use getsysinfo(1) -- --if [ -x /usr/convex/getsysinfo ] --then -- case `getsysinfo -f cpu_type` in -- c1*) -- echo c1-convex-bsd -- exit ;; -- c2*) -- if getsysinfo -f scalar_acc -- then echo c32-convex-bsd -- else echo c2-convex-bsd -- fi -- exit ;; -- c34*) -- echo c34-convex-bsd -- exit ;; -- c38*) -- echo c38-convex-bsd -- exit ;; -- c4*) -- echo c4-convex-bsd -- exit ;; -- esac --fi -- --cat >&2 < in order to provide the needed --information to handle your system. -- --config.guess timestamp = $timestamp -- --uname -m = `(uname -m) 2>/dev/null || echo unknown` --uname -r = `(uname -r) 2>/dev/null || echo unknown` --uname -s = `(uname -s) 2>/dev/null || echo unknown` --uname -v = `(uname -v) 2>/dev/null || echo unknown` -- --/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` --/bin/uname -X = `(/bin/uname -X) 2>/dev/null` -- --hostinfo = `(hostinfo) 2>/dev/null` --/bin/universe = `(/bin/universe) 2>/dev/null` --/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` --/bin/arch = `(/bin/arch) 2>/dev/null` --/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` --/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` -- --UNAME_MACHINE = ${UNAME_MACHINE} --UNAME_RELEASE = ${UNAME_RELEASE} --UNAME_SYSTEM = ${UNAME_SYSTEM} --UNAME_VERSION = ${UNAME_VERSION} --EOF -- --exit 1 -- --# Local variables: --# eval: (add-hook 'write-file-hooks 'time-stamp) --# time-stamp-start: "timestamp='" --# time-stamp-format: "%:y-%02m-%02d" --# time-stamp-end: "'" --# End: ---- a/libraries/libapparmor/config.sub -+++ /dev/null -@@ -1,1714 +0,0 @@ --#! /bin/sh --# Configuration validation subroutine script. --# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, --# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 --# Free Software Foundation, Inc. -- --timestamp='2010-01-22' -- --# This file is (in principle) common to ALL GNU software. --# The presence of a machine in this file suggests that SOME GNU software --# can handle that machine. It does not imply ALL GNU software can. --# --# This file is free software; you can redistribute it and/or modify --# it under the terms of the GNU General Public License as published by --# the Free Software Foundation; either version 2 of the License, or --# (at your option) any later version. --# --# This program is distributed in the hope that it will be useful, --# but WITHOUT ANY WARRANTY; without even the implied warranty of --# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the --# GNU General Public License for more details. --# --# You should have received a copy of the GNU General Public License --# along with this program; if not, write to the Free Software --# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA --# 02110-1301, USA. --# --# As a special exception to the GNU General Public License, if you --# distribute this file as part of a program that contains a --# configuration script generated by Autoconf, you may include it under --# the same distribution terms that you use for the rest of that program. -- -- --# Please send patches to . Submit a context --# diff and a properly formatted GNU ChangeLog entry. --# --# Configuration subroutine to validate and canonicalize a configuration type. --# Supply the specified configuration type as an argument. --# If it is invalid, we print an error message on stderr and exit with code 1. --# Otherwise, we print the canonical config type on stdout and succeed. -- --# You can get the latest version of this script from: --# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD -- --# This file is supposed to be the same for all GNU packages --# and recognize all the CPU types, system types and aliases --# that are meaningful with *any* GNU software. --# Each package is responsible for reporting which valid configurations --# it does not support. The user should be able to distinguish --# a failure to support a valid configuration from a meaningless --# configuration. -- --# The goal of this file is to map all the various variations of a given --# machine specification into a single specification in the form: --# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM --# or in some cases, the newer four-part form: --# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM --# It is wrong to echo any other type of specification. -- --me=`echo "$0" | sed -e 's,.*/,,'` -- --usage="\ --Usage: $0 [OPTION] CPU-MFR-OPSYS -- $0 [OPTION] ALIAS -- --Canonicalize a configuration name. -- --Operation modes: -- -h, --help print this help, then exit -- -t, --time-stamp print date of last modification, then exit -- -v, --version print version number, then exit -- --Report bugs and patches to ." -- --version="\ --GNU config.sub ($timestamp) -- --Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, --2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free --Software Foundation, Inc. -- --This is free software; see the source for copying conditions. There is NO --warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." -- --help=" --Try \`$me --help' for more information." -- --# Parse command line --while test $# -gt 0 ; do -- case $1 in -- --time-stamp | --time* | -t ) -- echo "$timestamp" ; exit ;; -- --version | -v ) -- echo "$version" ; exit ;; -- --help | --h* | -h ) -- echo "$usage"; exit ;; -- -- ) # Stop option processing -- shift; break ;; -- - ) # Use stdin as input. -- break ;; -- -* ) -- echo "$me: invalid option $1$help" -- exit 1 ;; -- -- *local*) -- # First pass through any local machine types. -- echo $1 -- exit ;; -- -- * ) -- break ;; -- esac --done -- --case $# in -- 0) echo "$me: missing argument$help" >&2 -- exit 1;; -- 1) ;; -- *) echo "$me: too many arguments$help" >&2 -- exit 1;; --esac -- --# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). --# Here we must recognize all the valid KERNEL-OS combinations. --maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` --case $maybe_os in -- nto-qnx* | linux-gnu* | linux-dietlibc | linux-newlib* | linux-uclibc* | \ -- uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | \ -- kopensolaris*-gnu* | \ -- storm-chaos* | os2-emx* | rtmk-nova*) -- os=-$maybe_os -- basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` -- ;; -- *) -- basic_machine=`echo $1 | sed 's/-[^-]*$//'` -- if [ $basic_machine != $1 ] -- then os=`echo $1 | sed 's/.*-/-/'` -- else os=; fi -- ;; --esac -- --### Let's recognize common machines as not being operating systems so --### that things like config.sub decstation-3100 work. We also --### recognize some manufacturers as not being operating systems, so we --### can provide default operating systems below. --case $os in -- -sun*os*) -- # Prevent following clause from handling this invalid input. -- ;; -- -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ -- -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ -- -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ -- -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ -- -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ -- -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ -- -apple | -axis | -knuth | -cray | -microblaze) -- os= -- basic_machine=$1 -- ;; -- -bluegene*) -- os=-cnk -- ;; -- -sim | -cisco | -oki | -wec | -winbond) -- os= -- basic_machine=$1 -- ;; -- -scout) -- ;; -- -wrs) -- os=-vxworks -- basic_machine=$1 -- ;; -- -chorusos*) -- os=-chorusos -- basic_machine=$1 -- ;; -- -chorusrdb) -- os=-chorusrdb -- basic_machine=$1 -- ;; -- -hiux*) -- os=-hiuxwe2 -- ;; -- -sco6) -- os=-sco5v6 -- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` -- ;; -- -sco5) -- os=-sco3.2v5 -- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` -- ;; -- -sco4) -- os=-sco3.2v4 -- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` -- ;; -- -sco3.2.[4-9]*) -- os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` -- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` -- ;; -- -sco3.2v[4-9]*) -- # Don't forget version if it is 3.2v4 or newer. -- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` -- ;; -- -sco5v6*) -- # Don't forget version if it is 3.2v4 or newer. -- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` -- ;; -- -sco*) -- os=-sco3.2v2 -- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` -- ;; -- -udk*) -- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` -- ;; -- -isc) -- os=-isc2.2 -- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` -- ;; -- -clix*) -- basic_machine=clipper-intergraph -- ;; -- -isc*) -- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` -- ;; -- -lynx*) -- os=-lynxos -- ;; -- -ptx*) -- basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` -- ;; -- -windowsnt*) -- os=`echo $os | sed -e 's/windowsnt/winnt/'` -- ;; -- -psos*) -- os=-psos -- ;; -- -mint | -mint[0-9]*) -- basic_machine=m68k-atari -- os=-mint -- ;; --esac -- --# Decode aliases for certain CPU-COMPANY combinations. --case $basic_machine in -- # Recognize the basic CPU types without company name. -- # Some are omitted here because they have special meanings below. -- 1750a | 580 \ -- | a29k \ -- | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ -- | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ -- | am33_2.0 \ -- | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \ -- | bfin \ -- | c4x | clipper \ -- | d10v | d30v | dlx | dsp16xx \ -- | fido | fr30 | frv \ -- | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ -- | i370 | i860 | i960 | ia64 \ -- | ip2k | iq2000 \ -- | lm32 \ -- | m32c | m32r | m32rle | m68000 | m68k | m88k \ -- | maxq | mb | microblaze | mcore | mep | metag \ -- | mips | mipsbe | mipseb | mipsel | mipsle \ -- | mips16 \ -- | mips64 | mips64el \ -- | mips64octeon | mips64octeonel \ -- | mips64orion | mips64orionel \ -- | mips64r5900 | mips64r5900el \ -- | mips64vr | mips64vrel \ -- | mips64vr4100 | mips64vr4100el \ -- | mips64vr4300 | mips64vr4300el \ -- | mips64vr5000 | mips64vr5000el \ -- | mips64vr5900 | mips64vr5900el \ -- | mipsisa32 | mipsisa32el \ -- | mipsisa32r2 | mipsisa32r2el \ -- | mipsisa64 | mipsisa64el \ -- | mipsisa64r2 | mipsisa64r2el \ -- | mipsisa64sb1 | mipsisa64sb1el \ -- | mipsisa64sr71k | mipsisa64sr71kel \ -- | mipstx39 | mipstx39el \ -- | mn10200 | mn10300 \ -- | moxie \ -- | mt \ -- | msp430 \ -- | nios | nios2 \ -- | ns16k | ns32k \ -- | or32 \ -- | pdp10 | pdp11 | pj | pjl \ -- | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \ -- | pyramid \ -- | rx \ -- | score \ -- | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ -- | sh64 | sh64le \ -- | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ -- | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ -- | spu | strongarm \ -- | tahoe | thumb | tic4x | tic80 | tron \ -- | ubicom32 \ -- | v850 | v850e \ -- | we32k \ -- | x86 | xc16x | xscale | xscalee[bl] | xstormy16 | xtensa \ -- | z8k | z80) -- basic_machine=$basic_machine-unknown -- ;; -- m6811 | m68hc11 | m6812 | m68hc12 | picochip) -- # Motorola 68HC11/12. -- basic_machine=$basic_machine-unknown -- os=-none -- ;; -- m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) -- ;; -- ms1) -- basic_machine=mt-unknown -- ;; -- -- # We use `pc' rather than `unknown' -- # because (1) that's what they normally are, and -- # (2) the word "unknown" tends to confuse beginning users. -- i*86 | x86_64) -- basic_machine=$basic_machine-pc -- ;; -- # Object if more than one company name word. -- *-*-*) -- echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 -- exit 1 -- ;; -- # Recognize the basic CPU types with company name. -- 580-* \ -- | a29k-* \ -- | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ -- | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ -- | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ -- | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ -- | avr-* | avr32-* \ -- | bfin-* | bs2000-* \ -- | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \ -- | clipper-* | craynv-* | cydra-* \ -- | d10v-* | d30v-* | dlx-* \ -- | elxsi-* \ -- | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ -- | h8300-* | h8500-* \ -- | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ -- | i*86-* | i860-* | i960-* | ia64-* \ -- | ip2k-* | iq2000-* \ -- | lm32-* \ -- | m32c-* | m32r-* | m32rle-* \ -- | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ -- | m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \ -- | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ -- | mips16-* \ -- | mips64-* | mips64el-* \ -- | mips64octeon-* | mips64octeonel-* \ -- | mips64orion-* | mips64orionel-* \ -- | mips64r5900-* | mips64r5900el-* \ -- | mips64vr-* | mips64vrel-* \ -- | mips64vr4100-* | mips64vr4100el-* \ -- | mips64vr4300-* | mips64vr4300el-* \ -- | mips64vr5000-* | mips64vr5000el-* \ -- | mips64vr5900-* | mips64vr5900el-* \ -- | mipsisa32-* | mipsisa32el-* \ -- | mipsisa32r2-* | mipsisa32r2el-* \ -- | mipsisa64-* | mipsisa64el-* \ -- | mipsisa64r2-* | mipsisa64r2el-* \ -- | mipsisa64sb1-* | mipsisa64sb1el-* \ -- | mipsisa64sr71k-* | mipsisa64sr71kel-* \ -- | mipstx39-* | mipstx39el-* \ -- | mmix-* \ -- | mt-* \ -- | msp430-* \ -- | nios-* | nios2-* \ -- | none-* | np1-* | ns16k-* | ns32k-* \ -- | orion-* \ -- | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ -- | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \ -- | pyramid-* \ -- | romp-* | rs6000-* | rx-* \ -- | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ -- | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ -- | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ -- | sparclite-* \ -- | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | strongarm-* | sv1-* | sx?-* \ -- | tahoe-* | thumb-* \ -- | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ -- | tile-* | tilegx-* \ -- | tron-* \ -- | ubicom32-* \ -- | v850-* | v850e-* | vax-* \ -- | we32k-* \ -- | x86-* | x86_64-* | xc16x-* | xps100-* | xscale-* | xscalee[bl]-* \ -- | xstormy16-* | xtensa*-* \ -- | ymp-* \ -- | z8k-* | z80-*) -- ;; -- # Recognize the basic CPU types without company name, with glob match. -- xtensa*) -- basic_machine=$basic_machine-unknown -- ;; -- # Recognize the various machine names and aliases which stand -- # for a CPU type and a company and sometimes even an OS. -- 386bsd) -- basic_machine=i386-unknown -- os=-bsd -- ;; -- 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) -- basic_machine=m68000-att -- ;; -- 3b*) -- basic_machine=we32k-att -- ;; -- a29khif) -- basic_machine=a29k-amd -- os=-udi -- ;; -- abacus) -- basic_machine=abacus-unknown -- ;; -- adobe68k) -- basic_machine=m68010-adobe -- os=-scout -- ;; -- alliant | fx80) -- basic_machine=fx80-alliant -- ;; -- altos | altos3068) -- basic_machine=m68k-altos -- ;; -- am29k) -- basic_machine=a29k-none -- os=-bsd -- ;; -- amd64) -- basic_machine=x86_64-pc -- ;; -- amd64-*) -- basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` -- ;; -- amdahl) -- basic_machine=580-amdahl -- os=-sysv -- ;; -- amiga | amiga-*) -- basic_machine=m68k-unknown -- ;; -- amigaos | amigados) -- basic_machine=m68k-unknown -- os=-amigaos -- ;; -- amigaunix | amix) -- basic_machine=m68k-unknown -- os=-sysv4 -- ;; -- apollo68) -- basic_machine=m68k-apollo -- os=-sysv -- ;; -- apollo68bsd) -- basic_machine=m68k-apollo -- os=-bsd -- ;; -- aros) -- basic_machine=i386-pc -- os=-aros -- ;; -- aux) -- basic_machine=m68k-apple -- os=-aux -- ;; -- balance) -- basic_machine=ns32k-sequent -- os=-dynix -- ;; -- blackfin) -- basic_machine=bfin-unknown -- os=-linux -- ;; -- blackfin-*) -- basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` -- os=-linux -- ;; -- bluegene*) -- basic_machine=powerpc-ibm -- os=-cnk -- ;; -- c90) -- basic_machine=c90-cray -- os=-unicos -- ;; -- cegcc) -- basic_machine=arm-unknown -- os=-cegcc -- ;; -- convex-c1) -- basic_machine=c1-convex -- os=-bsd -- ;; -- convex-c2) -- basic_machine=c2-convex -- os=-bsd -- ;; -- convex-c32) -- basic_machine=c32-convex -- os=-bsd -- ;; -- convex-c34) -- basic_machine=c34-convex -- os=-bsd -- ;; -- convex-c38) -- basic_machine=c38-convex -- os=-bsd -- ;; -- cray | j90) -- basic_machine=j90-cray -- os=-unicos -- ;; -- craynv) -- basic_machine=craynv-cray -- os=-unicosmp -- ;; -- cr16) -- basic_machine=cr16-unknown -- os=-elf -- ;; -- crds | unos) -- basic_machine=m68k-crds -- ;; -- crisv32 | crisv32-* | etraxfs*) -- basic_machine=crisv32-axis -- ;; -- cris | cris-* | etrax*) -- basic_machine=cris-axis -- ;; -- crx) -- basic_machine=crx-unknown -- os=-elf -- ;; -- da30 | da30-*) -- basic_machine=m68k-da30 -- ;; -- decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) -- basic_machine=mips-dec -- ;; -- decsystem10* | dec10*) -- basic_machine=pdp10-dec -- os=-tops10 -- ;; -- decsystem20* | dec20*) -- basic_machine=pdp10-dec -- os=-tops20 -- ;; -- delta | 3300 | motorola-3300 | motorola-delta \ -- | 3300-motorola | delta-motorola) -- basic_machine=m68k-motorola -- ;; -- delta88) -- basic_machine=m88k-motorola -- os=-sysv3 -- ;; -- dicos) -- basic_machine=i686-pc -- os=-dicos -- ;; -- djgpp) -- basic_machine=i586-pc -- os=-msdosdjgpp -- ;; -- dpx20 | dpx20-*) -- basic_machine=rs6000-bull -- os=-bosx -- ;; -- dpx2* | dpx2*-bull) -- basic_machine=m68k-bull -- os=-sysv3 -- ;; -- ebmon29k) -- basic_machine=a29k-amd -- os=-ebmon -- ;; -- elxsi) -- basic_machine=elxsi-elxsi -- os=-bsd -- ;; -- encore | umax | mmax) -- basic_machine=ns32k-encore -- ;; -- es1800 | OSE68k | ose68k | ose | OSE) -- basic_machine=m68k-ericsson -- os=-ose -- ;; -- fx2800) -- basic_machine=i860-alliant -- ;; -- genix) -- basic_machine=ns32k-ns -- ;; -- gmicro) -- basic_machine=tron-gmicro -- os=-sysv -- ;; -- go32) -- basic_machine=i386-pc -- os=-go32 -- ;; -- h3050r* | hiux*) -- basic_machine=hppa1.1-hitachi -- os=-hiuxwe2 -- ;; -- h8300hms) -- basic_machine=h8300-hitachi -- os=-hms -- ;; -- h8300xray) -- basic_machine=h8300-hitachi -- os=-xray -- ;; -- h8500hms) -- basic_machine=h8500-hitachi -- os=-hms -- ;; -- harris) -- basic_machine=m88k-harris -- os=-sysv3 -- ;; -- hp300-*) -- basic_machine=m68k-hp -- ;; -- hp300bsd) -- basic_machine=m68k-hp -- os=-bsd -- ;; -- hp300hpux) -- basic_machine=m68k-hp -- os=-hpux -- ;; -- hp3k9[0-9][0-9] | hp9[0-9][0-9]) -- basic_machine=hppa1.0-hp -- ;; -- hp9k2[0-9][0-9] | hp9k31[0-9]) -- basic_machine=m68000-hp -- ;; -- hp9k3[2-9][0-9]) -- basic_machine=m68k-hp -- ;; -- hp9k6[0-9][0-9] | hp6[0-9][0-9]) -- basic_machine=hppa1.0-hp -- ;; -- hp9k7[0-79][0-9] | hp7[0-79][0-9]) -- basic_machine=hppa1.1-hp -- ;; -- hp9k78[0-9] | hp78[0-9]) -- # FIXME: really hppa2.0-hp -- basic_machine=hppa1.1-hp -- ;; -- hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) -- # FIXME: really hppa2.0-hp -- basic_machine=hppa1.1-hp -- ;; -- hp9k8[0-9][13679] | hp8[0-9][13679]) -- basic_machine=hppa1.1-hp -- ;; -- hp9k8[0-9][0-9] | hp8[0-9][0-9]) -- basic_machine=hppa1.0-hp -- ;; -- hppa-next) -- os=-nextstep3 -- ;; -- hppaosf) -- basic_machine=hppa1.1-hp -- os=-osf -- ;; -- hppro) -- basic_machine=hppa1.1-hp -- os=-proelf -- ;; -- i370-ibm* | ibm*) -- basic_machine=i370-ibm -- ;; --# I'm not sure what "Sysv32" means. Should this be sysv3.2? -- i*86v32) -- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` -- os=-sysv32 -- ;; -- i*86v4*) -- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` -- os=-sysv4 -- ;; -- i*86v) -- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` -- os=-sysv -- ;; -- i*86sol2) -- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` -- os=-solaris2 -- ;; -- i386mach) -- basic_machine=i386-mach -- os=-mach -- ;; -- i386-vsta | vsta) -- basic_machine=i386-unknown -- os=-vsta -- ;; -- iris | iris4d) -- basic_machine=mips-sgi -- case $os in -- -irix*) -- ;; -- *) -- os=-irix4 -- ;; -- esac -- ;; -- isi68 | isi) -- basic_machine=m68k-isi -- os=-sysv -- ;; -- m68knommu) -- basic_machine=m68k-unknown -- os=-linux -- ;; -- m68knommu-*) -- basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` -- os=-linux -- ;; -- m88k-omron*) -- basic_machine=m88k-omron -- ;; -- magnum | m3230) -- basic_machine=mips-mips -- os=-sysv -- ;; -- merlin) -- basic_machine=ns32k-utek -- os=-sysv -- ;; -- microblaze) -- basic_machine=microblaze-xilinx -- ;; -- mingw32) -- basic_machine=i386-pc -- os=-mingw32 -- ;; -- mingw32ce) -- basic_machine=arm-unknown -- os=-mingw32ce -- ;; -- miniframe) -- basic_machine=m68000-convergent -- ;; -- *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) -- basic_machine=m68k-atari -- os=-mint -- ;; -- mips3*-*) -- basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` -- ;; -- mips3*) -- basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown -- ;; -- monitor) -- basic_machine=m68k-rom68k -- os=-coff -- ;; -- morphos) -- basic_machine=powerpc-unknown -- os=-morphos -- ;; -- msdos) -- basic_machine=i386-pc -- os=-msdos -- ;; -- ms1-*) -- basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` -- ;; -- mvs) -- basic_machine=i370-ibm -- os=-mvs -- ;; -- ncr3000) -- basic_machine=i486-ncr -- os=-sysv4 -- ;; -- netbsd386) -- basic_machine=i386-unknown -- os=-netbsd -- ;; -- netwinder) -- basic_machine=armv4l-rebel -- os=-linux -- ;; -- news | news700 | news800 | news900) -- basic_machine=m68k-sony -- os=-newsos -- ;; -- news1000) -- basic_machine=m68030-sony -- os=-newsos -- ;; -- news-3600 | risc-news) -- basic_machine=mips-sony -- os=-newsos -- ;; -- necv70) -- basic_machine=v70-nec -- os=-sysv -- ;; -- next | m*-next ) -- basic_machine=m68k-next -- case $os in -- -nextstep* ) -- ;; -- -ns2*) -- os=-nextstep2 -- ;; -- *) -- os=-nextstep3 -- ;; -- esac -- ;; -- nh3000) -- basic_machine=m68k-harris -- os=-cxux -- ;; -- nh[45]000) -- basic_machine=m88k-harris -- os=-cxux -- ;; -- nindy960) -- basic_machine=i960-intel -- os=-nindy -- ;; -- mon960) -- basic_machine=i960-intel -- os=-mon960 -- ;; -- nonstopux) -- basic_machine=mips-compaq -- os=-nonstopux -- ;; -- np1) -- basic_machine=np1-gould -- ;; -- nsr-tandem) -- basic_machine=nsr-tandem -- ;; -- op50n-* | op60c-*) -- basic_machine=hppa1.1-oki -- os=-proelf -- ;; -- openrisc | openrisc-*) -- basic_machine=or32-unknown -- ;; -- os400) -- basic_machine=powerpc-ibm -- os=-os400 -- ;; -- OSE68000 | ose68000) -- basic_machine=m68000-ericsson -- os=-ose -- ;; -- os68k) -- basic_machine=m68k-none -- os=-os68k -- ;; -- pa-hitachi) -- basic_machine=hppa1.1-hitachi -- os=-hiuxwe2 -- ;; -- paragon) -- basic_machine=i860-intel -- os=-osf -- ;; -- parisc) -- basic_machine=hppa-unknown -- os=-linux -- ;; -- parisc-*) -- basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` -- os=-linux -- ;; -- pbd) -- basic_machine=sparc-tti -- ;; -- pbb) -- basic_machine=m68k-tti -- ;; -- pc532 | pc532-*) -- basic_machine=ns32k-pc532 -- ;; -- pc98) -- basic_machine=i386-pc -- ;; -- pc98-*) -- basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` -- ;; -- pentium | p5 | k5 | k6 | nexgen | viac3) -- basic_machine=i586-pc -- ;; -- pentiumpro | p6 | 6x86 | athlon | athlon_*) -- basic_machine=i686-pc -- ;; -- pentiumii | pentium2 | pentiumiii | pentium3) -- basic_machine=i686-pc -- ;; -- pentium4) -- basic_machine=i786-pc -- ;; -- pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) -- basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` -- ;; -- pentiumpro-* | p6-* | 6x86-* | athlon-*) -- basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` -- ;; -- pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) -- basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` -- ;; -- pentium4-*) -- basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` -- ;; -- pn) -- basic_machine=pn-gould -- ;; -- power) basic_machine=power-ibm -- ;; -- ppc) basic_machine=powerpc-unknown -- ;; -- ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` -- ;; -- ppcle | powerpclittle | ppc-le | powerpc-little) -- basic_machine=powerpcle-unknown -- ;; -- ppcle-* | powerpclittle-*) -- basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` -- ;; -- ppc64) basic_machine=powerpc64-unknown -- ;; -- ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` -- ;; -- ppc64le | powerpc64little | ppc64-le | powerpc64-little) -- basic_machine=powerpc64le-unknown -- ;; -- ppc64le-* | powerpc64little-*) -- basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` -- ;; -- ps2) -- basic_machine=i386-ibm -- ;; -- pw32) -- basic_machine=i586-unknown -- os=-pw32 -- ;; -- rdos) -- basic_machine=i386-pc -- os=-rdos -- ;; -- rom68k) -- basic_machine=m68k-rom68k -- os=-coff -- ;; -- rm[46]00) -- basic_machine=mips-siemens -- ;; -- rtpc | rtpc-*) -- basic_machine=romp-ibm -- ;; -- s390 | s390-*) -- basic_machine=s390-ibm -- ;; -- s390x | s390x-*) -- basic_machine=s390x-ibm -- ;; -- sa29200) -- basic_machine=a29k-amd -- os=-udi -- ;; -- sb1) -- basic_machine=mipsisa64sb1-unknown -- ;; -- sb1el) -- basic_machine=mipsisa64sb1el-unknown -- ;; -- sde) -- basic_machine=mipsisa32-sde -- os=-elf -- ;; -- sei) -- basic_machine=mips-sei -- os=-seiux -- ;; -- sequent) -- basic_machine=i386-sequent -- ;; -- sh) -- basic_machine=sh-hitachi -- os=-hms -- ;; -- sh5el) -- basic_machine=sh5le-unknown -- ;; -- sh64) -- basic_machine=sh64-unknown -- ;; -- sparclite-wrs | simso-wrs) -- basic_machine=sparclite-wrs -- os=-vxworks -- ;; -- sps7) -- basic_machine=m68k-bull -- os=-sysv2 -- ;; -- spur) -- basic_machine=spur-unknown -- ;; -- st2000) -- basic_machine=m68k-tandem -- ;; -- stratus) -- basic_machine=i860-stratus -- os=-sysv4 -- ;; -- sun2) -- basic_machine=m68000-sun -- ;; -- sun2os3) -- basic_machine=m68000-sun -- os=-sunos3 -- ;; -- sun2os4) -- basic_machine=m68000-sun -- os=-sunos4 -- ;; -- sun3os3) -- basic_machine=m68k-sun -- os=-sunos3 -- ;; -- sun3os4) -- basic_machine=m68k-sun -- os=-sunos4 -- ;; -- sun4os3) -- basic_machine=sparc-sun -- os=-sunos3 -- ;; -- sun4os4) -- basic_machine=sparc-sun -- os=-sunos4 -- ;; -- sun4sol2) -- basic_machine=sparc-sun -- os=-solaris2 -- ;; -- sun3 | sun3-*) -- basic_machine=m68k-sun -- ;; -- sun4) -- basic_machine=sparc-sun -- ;; -- sun386 | sun386i | roadrunner) -- basic_machine=i386-sun -- ;; -- sv1) -- basic_machine=sv1-cray -- os=-unicos -- ;; -- symmetry) -- basic_machine=i386-sequent -- os=-dynix -- ;; -- t3e) -- basic_machine=alphaev5-cray -- os=-unicos -- ;; -- t90) -- basic_machine=t90-cray -- os=-unicos -- ;; -- tic54x | c54x*) -- basic_machine=tic54x-unknown -- os=-coff -- ;; -- tic55x | c55x*) -- basic_machine=tic55x-unknown -- os=-coff -- ;; -- tic6x | c6x*) -- basic_machine=tic6x-unknown -- os=-coff -- ;; -- # This must be matched before tile*. -- tilegx*) -- basic_machine=tilegx-unknown -- os=-linux-gnu -- ;; -- tile*) -- basic_machine=tile-unknown -- os=-linux-gnu -- ;; -- tx39) -- basic_machine=mipstx39-unknown -- ;; -- tx39el) -- basic_machine=mipstx39el-unknown -- ;; -- toad1) -- basic_machine=pdp10-xkl -- os=-tops20 -- ;; -- tower | tower-32) -- basic_machine=m68k-ncr -- ;; -- tpf) -- basic_machine=s390x-ibm -- os=-tpf -- ;; -- udi29k) -- basic_machine=a29k-amd -- os=-udi -- ;; -- ultra3) -- basic_machine=a29k-nyu -- os=-sym1 -- ;; -- v810 | necv810) -- basic_machine=v810-nec -- os=-none -- ;; -- vaxv) -- basic_machine=vax-dec -- os=-sysv -- ;; -- vms) -- basic_machine=vax-dec -- os=-vms -- ;; -- vpp*|vx|vx-*) -- basic_machine=f301-fujitsu -- ;; -- vxworks960) -- basic_machine=i960-wrs -- os=-vxworks -- ;; -- vxworks68) -- basic_machine=m68k-wrs -- os=-vxworks -- ;; -- vxworks29k) -- basic_machine=a29k-wrs -- os=-vxworks -- ;; -- w65*) -- basic_machine=w65-wdc -- os=-none -- ;; -- w89k-*) -- basic_machine=hppa1.1-winbond -- os=-proelf -- ;; -- xbox) -- basic_machine=i686-pc -- os=-mingw32 -- ;; -- xps | xps100) -- basic_machine=xps100-honeywell -- ;; -- ymp) -- basic_machine=ymp-cray -- os=-unicos -- ;; -- z8k-*-coff) -- basic_machine=z8k-unknown -- os=-sim -- ;; -- z80-*-coff) -- basic_machine=z80-unknown -- os=-sim -- ;; -- none) -- basic_machine=none-none -- os=-none -- ;; -- --# Here we handle the default manufacturer of certain CPU types. It is in --# some cases the only manufacturer, in others, it is the most popular. -- w89k) -- basic_machine=hppa1.1-winbond -- ;; -- op50n) -- basic_machine=hppa1.1-oki -- ;; -- op60c) -- basic_machine=hppa1.1-oki -- ;; -- romp) -- basic_machine=romp-ibm -- ;; -- mmix) -- basic_machine=mmix-knuth -- ;; -- rs6000) -- basic_machine=rs6000-ibm -- ;; -- vax) -- basic_machine=vax-dec -- ;; -- pdp10) -- # there are many clones, so DEC is not a safe bet -- basic_machine=pdp10-unknown -- ;; -- pdp11) -- basic_machine=pdp11-dec -- ;; -- we32k) -- basic_machine=we32k-att -- ;; -- sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) -- basic_machine=sh-unknown -- ;; -- sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) -- basic_machine=sparc-sun -- ;; -- cydra) -- basic_machine=cydra-cydrome -- ;; -- orion) -- basic_machine=orion-highlevel -- ;; -- orion105) -- basic_machine=clipper-highlevel -- ;; -- mac | mpw | mac-mpw) -- basic_machine=m68k-apple -- ;; -- pmac | pmac-mpw) -- basic_machine=powerpc-apple -- ;; -- *-unknown) -- # Make sure to match an already-canonicalized machine name. -- ;; -- *) -- echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 -- exit 1 -- ;; --esac -- --# Here we canonicalize certain aliases for manufacturers. --case $basic_machine in -- *-digital*) -- basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` -- ;; -- *-commodore*) -- basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` -- ;; -- *) -- ;; --esac -- --# Decode manufacturer-specific aliases for certain operating systems. -- --if [ x"$os" != x"" ] --then --case $os in -- # First match some system type aliases -- # that might get confused with valid system types. -- # -solaris* is a basic system type, with this one exception. -- -auroraux) -- os=-auroraux -- ;; -- -solaris1 | -solaris1.*) -- os=`echo $os | sed -e 's|solaris1|sunos4|'` -- ;; -- -solaris) -- os=-solaris2 -- ;; -- -svr4*) -- os=-sysv4 -- ;; -- -unixware*) -- os=-sysv4.2uw -- ;; -- -gnu/linux*) -- os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` -- ;; -- # First accept the basic system types. -- # The portable systems comes first. -- # Each alternative MUST END IN A *, to match a version number. -- # -sysv* is not here because it comes later, after sysvr4. -- -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ -- | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ -- | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ -- | -sym* | -kopensolaris* \ -- | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ -- | -aos* | -aros* \ -- | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ -- | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ -- | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ -- | -openbsd* | -solidbsd* \ -- | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ -- | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ -- | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ -- | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ -- | -chorusos* | -chorusrdb* | -cegcc* \ -- | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ -- | -mingw32* | -linux-gnu* | -linux-newlib* | -linux-uclibc* \ -- | -uxpv* | -beos* | -mpeix* | -udk* \ -- | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ -- | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ -- | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ -- | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ -- | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ -- | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ -- | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*) -- # Remember, each alternative MUST END IN *, to match a version number. -- ;; -- -qnx*) -- case $basic_machine in -- x86-* | i*86-*) -- ;; -- *) -- os=-nto$os -- ;; -- esac -- ;; -- -nto-qnx*) -- ;; -- -nto*) -- os=`echo $os | sed -e 's|nto|nto-qnx|'` -- ;; -- -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ -- | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ -- | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) -- ;; -- -mac*) -- os=`echo $os | sed -e 's|mac|macos|'` -- ;; -- -linux-dietlibc) -- os=-linux-dietlibc -- ;; -- -linux*) -- os=`echo $os | sed -e 's|linux|linux-gnu|'` -- ;; -- -sunos5*) -- os=`echo $os | sed -e 's|sunos5|solaris2|'` -- ;; -- -sunos6*) -- os=`echo $os | sed -e 's|sunos6|solaris3|'` -- ;; -- -opened*) -- os=-openedition -- ;; -- -os400*) -- os=-os400 -- ;; -- -wince*) -- os=-wince -- ;; -- -osfrose*) -- os=-osfrose -- ;; -- -osf*) -- os=-osf -- ;; -- -utek*) -- os=-bsd -- ;; -- -dynix*) -- os=-bsd -- ;; -- -acis*) -- os=-aos -- ;; -- -atheos*) -- os=-atheos -- ;; -- -syllable*) -- os=-syllable -- ;; -- -386bsd) -- os=-bsd -- ;; -- -ctix* | -uts*) -- os=-sysv -- ;; -- -nova*) -- os=-rtmk-nova -- ;; -- -ns2 ) -- os=-nextstep2 -- ;; -- -nsk*) -- os=-nsk -- ;; -- # Preserve the version number of sinix5. -- -sinix5.*) -- os=`echo $os | sed -e 's|sinix|sysv|'` -- ;; -- -sinix*) -- os=-sysv4 -- ;; -- -tpf*) -- os=-tpf -- ;; -- -triton*) -- os=-sysv3 -- ;; -- -oss*) -- os=-sysv3 -- ;; -- -svr4) -- os=-sysv4 -- ;; -- -svr3) -- os=-sysv3 -- ;; -- -sysvr4) -- os=-sysv4 -- ;; -- # This must come after -sysvr4. -- -sysv*) -- ;; -- -ose*) -- os=-ose -- ;; -- -es1800*) -- os=-ose -- ;; -- -xenix) -- os=-xenix -- ;; -- -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) -- os=-mint -- ;; -- -aros*) -- os=-aros -- ;; -- -kaos*) -- os=-kaos -- ;; -- -zvmoe) -- os=-zvmoe -- ;; -- -dicos*) -- os=-dicos -- ;; -- -nacl*) -- ;; -- -none) -- ;; -- *) -- # Get rid of the `-' at the beginning of $os. -- os=`echo $os | sed 's/[^-]*-//'` -- echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 -- exit 1 -- ;; --esac --else -- --# Here we handle the default operating systems that come with various machines. --# The value should be what the vendor currently ships out the door with their --# machine or put another way, the most popular os provided with the machine. -- --# Note that if you're going to try to match "-MANUFACTURER" here (say, --# "-sun"), then you have to tell the case statement up towards the top --# that MANUFACTURER isn't an operating system. Otherwise, code above --# will signal an error saying that MANUFACTURER isn't an operating --# system, and we'll never get to this point. -- --case $basic_machine in -- score-*) -- os=-elf -- ;; -- spu-*) -- os=-elf -- ;; -- *-acorn) -- os=-riscix1.2 -- ;; -- arm*-rebel) -- os=-linux -- ;; -- arm*-semi) -- os=-aout -- ;; -- c4x-* | tic4x-*) -- os=-coff -- ;; -- # This must come before the *-dec entry. -- pdp10-*) -- os=-tops20 -- ;; -- pdp11-*) -- os=-none -- ;; -- *-dec | vax-*) -- os=-ultrix4.2 -- ;; -- m68*-apollo) -- os=-domain -- ;; -- i386-sun) -- os=-sunos4.0.2 -- ;; -- m68000-sun) -- os=-sunos3 -- # This also exists in the configure program, but was not the -- # default. -- # os=-sunos4 -- ;; -- m68*-cisco) -- os=-aout -- ;; -- mep-*) -- os=-elf -- ;; -- mips*-cisco) -- os=-elf -- ;; -- mips*-*) -- os=-elf -- ;; -- or32-*) -- os=-coff -- ;; -- *-tti) # must be before sparc entry or we get the wrong os. -- os=-sysv3 -- ;; -- sparc-* | *-sun) -- os=-sunos4.1.1 -- ;; -- *-be) -- os=-beos -- ;; -- *-haiku) -- os=-haiku -- ;; -- *-ibm) -- os=-aix -- ;; -- *-knuth) -- os=-mmixware -- ;; -- *-wec) -- os=-proelf -- ;; -- *-winbond) -- os=-proelf -- ;; -- *-oki) -- os=-proelf -- ;; -- *-hp) -- os=-hpux -- ;; -- *-hitachi) -- os=-hiux -- ;; -- i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) -- os=-sysv -- ;; -- *-cbm) -- os=-amigaos -- ;; -- *-dg) -- os=-dgux -- ;; -- *-dolphin) -- os=-sysv3 -- ;; -- m68k-ccur) -- os=-rtu -- ;; -- m88k-omron*) -- os=-luna -- ;; -- *-next ) -- os=-nextstep -- ;; -- *-sequent) -- os=-ptx -- ;; -- *-crds) -- os=-unos -- ;; -- *-ns) -- os=-genix -- ;; -- i370-*) -- os=-mvs -- ;; -- *-next) -- os=-nextstep3 -- ;; -- *-gould) -- os=-sysv -- ;; -- *-highlevel) -- os=-bsd -- ;; -- *-encore) -- os=-bsd -- ;; -- *-sgi) -- os=-irix -- ;; -- *-siemens) -- os=-sysv4 -- ;; -- *-masscomp) -- os=-rtu -- ;; -- f30[01]-fujitsu | f700-fujitsu) -- os=-uxpv -- ;; -- *-rom68k) -- os=-coff -- ;; -- *-*bug) -- os=-coff -- ;; -- *-apple) -- os=-macos -- ;; -- *-atari*) -- os=-mint -- ;; -- *) -- os=-none -- ;; --esac --fi -- --# Here we handle the case where we know the os, and the CPU type, but not the --# manufacturer. We pick the logical manufacturer. --vendor=unknown --case $basic_machine in -- *-unknown) -- case $os in -- -riscix*) -- vendor=acorn -- ;; -- -sunos*) -- vendor=sun -- ;; -- -cnk*|-aix*) -- vendor=ibm -- ;; -- -beos*) -- vendor=be -- ;; -- -hpux*) -- vendor=hp -- ;; -- -mpeix*) -- vendor=hp -- ;; -- -hiux*) -- vendor=hitachi -- ;; -- -unos*) -- vendor=crds -- ;; -- -dgux*) -- vendor=dg -- ;; -- -luna*) -- vendor=omron -- ;; -- -genix*) -- vendor=ns -- ;; -- -mvs* | -opened*) -- vendor=ibm -- ;; -- -os400*) -- vendor=ibm -- ;; -- -ptx*) -- vendor=sequent -- ;; -- -tpf*) -- vendor=ibm -- ;; -- -vxsim* | -vxworks* | -windiss*) -- vendor=wrs -- ;; -- -aux*) -- vendor=apple -- ;; -- -hms*) -- vendor=hitachi -- ;; -- -mpw* | -macos*) -- vendor=apple -- ;; -- -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) -- vendor=atari -- ;; -- -vos*) -- vendor=stratus -- ;; -- esac -- basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` -- ;; --esac -- --echo $basic_machine$os --exit -- --# Local variables: --# eval: (add-hook 'write-file-hooks 'time-stamp) --# time-stamp-start: "timestamp='" --# time-stamp-format: "%:y-%02m-%02d" --# time-stamp-end: "'" --# End: ---- a/libraries/libapparmor/doc/Makefile.am -+++ b/libraries/libapparmor/doc/Makefile.am -@@ -11,11 +11,9 @@ EXTRA_DIST = $(man_MANS) $(PODS) - ## delete man pages at maintainer-clean - BUILT_SOURCES = $(man_MANS) - --%.2: %.pod -- $(POD2MAN) \ -- --section=2 \ -- --release="NOVELL/SUSE" \ -- --center="AppArmor" \ -- --date="2007-07-27" \ -- $< > $@ --$ -+PODARGS = --center=AppArmor --release=NOVELL/SUSE -+ -+pod2man = pod2man $(PODARGS) --section $(subst .,,$(suffix $<)) $< > $@ -+ -+.pod.2: -+ $(pod2man) ---- a/libraries/libapparmor/install-sh -+++ /dev/null -@@ -1,520 +0,0 @@ --#!/bin/sh --# install - install a program, script, or datafile -- --scriptversion=2009-04-28.21; # UTC -- --# This originates from X11R5 (mit/util/scripts/install.sh), which was --# later released in X11R6 (xc/config/util/install.sh) with the --# following copyright and license. --# --# Copyright (C) 1994 X Consortium --# --# Permission is hereby granted, free of charge, to any person obtaining a copy --# of this software and associated documentation files (the "Software"), to --# deal in the Software without restriction, including without limitation the --# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or --# sell copies of the Software, and to permit persons to whom the Software is --# furnished to do so, subject to the following conditions: --# --# The above copyright notice and this permission notice shall be included in --# all copies or substantial portions of the Software. --# --# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR --# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, --# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE --# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN --# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- --# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --# --# Except as contained in this notice, the name of the X Consortium shall not --# be used in advertising or otherwise to promote the sale, use or other deal- --# ings in this Software without prior written authorization from the X Consor- --# tium. --# --# --# FSF changes to this file are in the public domain. --# --# Calling this script install-sh is preferred over install.sh, to prevent --# `make' implicit rules from creating a file called install from it --# when there is no Makefile. --# --# This script is compatible with the BSD install script, but was written --# from scratch. -- --nl=' --' --IFS=" "" $nl" -- --# set DOITPROG to echo to test this script -- --# Don't use :- since 4.3BSD and earlier shells don't like it. --doit=${DOITPROG-} --if test -z "$doit"; then -- doit_exec=exec --else -- doit_exec=$doit --fi -- --# Put in absolute file names if you don't have them in your path; --# or use environment vars. -- --chgrpprog=${CHGRPPROG-chgrp} --chmodprog=${CHMODPROG-chmod} --chownprog=${CHOWNPROG-chown} --cmpprog=${CMPPROG-cmp} --cpprog=${CPPROG-cp} --mkdirprog=${MKDIRPROG-mkdir} --mvprog=${MVPROG-mv} --rmprog=${RMPROG-rm} --stripprog=${STRIPPROG-strip} -- --posix_glob='?' --initialize_posix_glob=' -- test "$posix_glob" != "?" || { -- if (set -f) 2>/dev/null; then -- posix_glob= -- else -- posix_glob=: -- fi -- } --' -- --posix_mkdir= -- --# Desired mode of installed file. --mode=0755 -- --chgrpcmd= --chmodcmd=$chmodprog --chowncmd= --mvcmd=$mvprog --rmcmd="$rmprog -f" --stripcmd= -- --src= --dst= --dir_arg= --dst_arg= -- --copy_on_change=false --no_target_directory= -- --usage="\ --Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE -- or: $0 [OPTION]... SRCFILES... DIRECTORY -- or: $0 [OPTION]... -t DIRECTORY SRCFILES... -- or: $0 [OPTION]... -d DIRECTORIES... -- --In the 1st form, copy SRCFILE to DSTFILE. --In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. --In the 4th, create DIRECTORIES. -- --Options: -- --help display this help and exit. -- --version display version info and exit. -- -- -c (ignored) -- -C install only if different (preserve the last data modification time) -- -d create directories instead of installing files. -- -g GROUP $chgrpprog installed files to GROUP. -- -m MODE $chmodprog installed files to MODE. -- -o USER $chownprog installed files to USER. -- -s $stripprog installed files. -- -t DIRECTORY install into DIRECTORY. -- -T report an error if DSTFILE is a directory. -- --Environment variables override the default commands: -- CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG -- RMPROG STRIPPROG --" -- --while test $# -ne 0; do -- case $1 in -- -c) ;; -- -- -C) copy_on_change=true;; -- -- -d) dir_arg=true;; -- -- -g) chgrpcmd="$chgrpprog $2" -- shift;; -- -- --help) echo "$usage"; exit $?;; -- -- -m) mode=$2 -- case $mode in -- *' '* | *' '* | *' --'* | *'*'* | *'?'* | *'['*) -- echo "$0: invalid mode: $mode" >&2 -- exit 1;; -- esac -- shift;; -- -- -o) chowncmd="$chownprog $2" -- shift;; -- -- -s) stripcmd=$stripprog;; -- -- -t) dst_arg=$2 -- shift;; -- -- -T) no_target_directory=true;; -- -- --version) echo "$0 $scriptversion"; exit $?;; -- -- --) shift -- break;; -- -- -*) echo "$0: invalid option: $1" >&2 -- exit 1;; -- -- *) break;; -- esac -- shift --done -- --if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then -- # When -d is used, all remaining arguments are directories to create. -- # When -t is used, the destination is already specified. -- # Otherwise, the last argument is the destination. Remove it from $@. -- for arg -- do -- if test -n "$dst_arg"; then -- # $@ is not empty: it contains at least $arg. -- set fnord "$@" "$dst_arg" -- shift # fnord -- fi -- shift # arg -- dst_arg=$arg -- done --fi -- --if test $# -eq 0; then -- if test -z "$dir_arg"; then -- echo "$0: no input file specified." >&2 -- exit 1 -- fi -- # It's OK to call `install-sh -d' without argument. -- # This can happen when creating conditional directories. -- exit 0 --fi -- --if test -z "$dir_arg"; then -- trap '(exit $?); exit' 1 2 13 15 -- -- # Set umask so as not to create temps with too-generous modes. -- # However, 'strip' requires both read and write access to temps. -- case $mode in -- # Optimize common cases. -- *644) cp_umask=133;; -- *755) cp_umask=22;; -- -- *[0-7]) -- if test -z "$stripcmd"; then -- u_plus_rw= -- else -- u_plus_rw='% 200' -- fi -- cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; -- *) -- if test -z "$stripcmd"; then -- u_plus_rw= -- else -- u_plus_rw=,u+rw -- fi -- cp_umask=$mode$u_plus_rw;; -- esac --fi -- --for src --do -- # Protect names starting with `-'. -- case $src in -- -*) src=./$src;; -- esac -- -- if test -n "$dir_arg"; then -- dst=$src -- dstdir=$dst -- test -d "$dstdir" -- dstdir_status=$? -- else -- -- # Waiting for this to be detected by the "$cpprog $src $dsttmp" command -- # might cause directories to be created, which would be especially bad -- # if $src (and thus $dsttmp) contains '*'. -- if test ! -f "$src" && test ! -d "$src"; then -- echo "$0: $src does not exist." >&2 -- exit 1 -- fi -- -- if test -z "$dst_arg"; then -- echo "$0: no destination specified." >&2 -- exit 1 -- fi -- -- dst=$dst_arg -- # Protect names starting with `-'. -- case $dst in -- -*) dst=./$dst;; -- esac -- -- # If destination is a directory, append the input filename; won't work -- # if double slashes aren't ignored. -- if test -d "$dst"; then -- if test -n "$no_target_directory"; then -- echo "$0: $dst_arg: Is a directory" >&2 -- exit 1 -- fi -- dstdir=$dst -- dst=$dstdir/`basename "$src"` -- dstdir_status=0 -- else -- # Prefer dirname, but fall back on a substitute if dirname fails. -- dstdir=` -- (dirname "$dst") 2>/dev/null || -- expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ -- X"$dst" : 'X\(//\)[^/]' \| \ -- X"$dst" : 'X\(//\)$' \| \ -- X"$dst" : 'X\(/\)' \| . 2>/dev/null || -- echo X"$dst" | -- sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ -- s//\1/ -- q -- } -- /^X\(\/\/\)[^/].*/{ -- s//\1/ -- q -- } -- /^X\(\/\/\)$/{ -- s//\1/ -- q -- } -- /^X\(\/\).*/{ -- s//\1/ -- q -- } -- s/.*/./; q' -- ` -- -- test -d "$dstdir" -- dstdir_status=$? -- fi -- fi -- -- obsolete_mkdir_used=false -- -- if test $dstdir_status != 0; then -- case $posix_mkdir in -- '') -- # Create intermediate dirs using mode 755 as modified by the umask. -- # This is like FreeBSD 'install' as of 1997-10-28. -- umask=`umask` -- case $stripcmd.$umask in -- # Optimize common cases. -- *[2367][2367]) mkdir_umask=$umask;; -- .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; -- -- *[0-7]) -- mkdir_umask=`expr $umask + 22 \ -- - $umask % 100 % 40 + $umask % 20 \ -- - $umask % 10 % 4 + $umask % 2 -- `;; -- *) mkdir_umask=$umask,go-w;; -- esac -- -- # With -d, create the new directory with the user-specified mode. -- # Otherwise, rely on $mkdir_umask. -- if test -n "$dir_arg"; then -- mkdir_mode=-m$mode -- else -- mkdir_mode= -- fi -- -- posix_mkdir=false -- case $umask in -- *[123567][0-7][0-7]) -- # POSIX mkdir -p sets u+wx bits regardless of umask, which -- # is incompatible with FreeBSD 'install' when (umask & 300) != 0. -- ;; -- *) -- tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ -- trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 -- -- if (umask $mkdir_umask && -- exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 -- then -- if test -z "$dir_arg" || { -- # Check for POSIX incompatibilities with -m. -- # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or -- # other-writeable bit of parent directory when it shouldn't. -- # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. -- ls_ld_tmpdir=`ls -ld "$tmpdir"` -- case $ls_ld_tmpdir in -- d????-?r-*) different_mode=700;; -- d????-?--*) different_mode=755;; -- *) false;; -- esac && -- $mkdirprog -m$different_mode -p -- "$tmpdir" && { -- ls_ld_tmpdir_1=`ls -ld "$tmpdir"` -- test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" -- } -- } -- then posix_mkdir=: -- fi -- rmdir "$tmpdir/d" "$tmpdir" -- else -- # Remove any dirs left behind by ancient mkdir implementations. -- rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null -- fi -- trap '' 0;; -- esac;; -- esac -- -- if -- $posix_mkdir && ( -- umask $mkdir_umask && -- $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" -- ) -- then : -- else -- -- # The umask is ridiculous, or mkdir does not conform to POSIX, -- # or it failed possibly due to a race condition. Create the -- # directory the slow way, step by step, checking for races as we go. -- -- case $dstdir in -- /*) prefix='/';; -- -*) prefix='./';; -- *) prefix='';; -- esac -- -- eval "$initialize_posix_glob" -- -- oIFS=$IFS -- IFS=/ -- $posix_glob set -f -- set fnord $dstdir -- shift -- $posix_glob set +f -- IFS=$oIFS -- -- prefixes= -- -- for d -- do -- test -z "$d" && continue -- -- prefix=$prefix$d -- if test -d "$prefix"; then -- prefixes= -- else -- if $posix_mkdir; then -- (umask=$mkdir_umask && -- $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break -- # Don't fail if two instances are running concurrently. -- test -d "$prefix" || exit 1 -- else -- case $prefix in -- *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; -- *) qprefix=$prefix;; -- esac -- prefixes="$prefixes '$qprefix'" -- fi -- fi -- prefix=$prefix/ -- done -- -- if test -n "$prefixes"; then -- # Don't fail if two instances are running concurrently. -- (umask $mkdir_umask && -- eval "\$doit_exec \$mkdirprog $prefixes") || -- test -d "$dstdir" || exit 1 -- obsolete_mkdir_used=true -- fi -- fi -- fi -- -- if test -n "$dir_arg"; then -- { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && -- { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && -- { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || -- test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 -- else -- -- # Make a couple of temp file names in the proper directory. -- dsttmp=$dstdir/_inst.$$_ -- rmtmp=$dstdir/_rm.$$_ -- -- # Trap to clean up those temp files at exit. -- trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 -- -- # Copy the file name to the temp name. -- (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && -- -- # and set any options; do chmod last to preserve setuid bits. -- # -- # If any of these fail, we abort the whole thing. If we want to -- # ignore errors from any of these, just make sure not to ignore -- # errors from the above "$doit $cpprog $src $dsttmp" command. -- # -- { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && -- { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && -- { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && -- { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && -- -- # If -C, don't bother to copy if it wouldn't change the file. -- if $copy_on_change && -- old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && -- new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && -- -- eval "$initialize_posix_glob" && -- $posix_glob set -f && -- set X $old && old=:$2:$4:$5:$6 && -- set X $new && new=:$2:$4:$5:$6 && -- $posix_glob set +f && -- -- test "$old" = "$new" && -- $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 -- then -- rm -f "$dsttmp" -- else -- # Rename the file to the real destination. -- $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || -- -- # The rename failed, perhaps because mv can't rename something else -- # to itself, or perhaps because mv is so ancient that it does not -- # support -f. -- { -- # Now remove or move aside any old file at destination location. -- # We try this two ways since rm can't unlink itself on some -- # systems and the destination file might be busy for other -- # reasons. In this case, the final cleanup might fail but the new -- # file should still install successfully. -- { -- test ! -f "$dst" || -- $doit $rmcmd -f "$dst" 2>/dev/null || -- { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && -- { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } -- } || -- { echo "$0: cannot unlink or rename $dst" >&2 -- (exit 1); exit 1 -- } -- } && -- -- # Now rename the file to the real destination. -- $doit $mvcmd "$dsttmp" "$dst" -- } -- fi || exit 1 -- -- trap '' 0 -- fi --done -- --# Local variables: --# eval: (add-hook 'write-file-hooks 'time-stamp) --# time-stamp-start: "scriptversion=" --# time-stamp-format: "%:y-%02m-%02d.%02H" --# time-stamp-time-zone: "UTC" --# time-stamp-end: "; # UTC" --# End: ---- a/libraries/libapparmor/libapparmor1.spec -+++ /dev/null -@@ -1,178 +0,0 @@ --# --# spec file for package libapparmor --# --# norootforbuild --%define _unpackaged_files_terminate_build 0 -- --Name: libapparmor1 --Version: 2.5 --Release: 3.20070916 --License: LGPL --Group: Development/Libraries/C and C++ --BuildRoot: %{_tmppath}/%{name}-%{version}-build --Source0: %{name}-%{version}.tar.bz2 --BuildRequires: swig gcc perl --Provides: libapparmor --Provides: libimmunix --Obsoletes: libapparmor --Obsoletes: libimmunix --Summary: A utility library for AppArmor -- --%define aalibversion 1.0.2 -- --%description --- -- --%package -n libapparmor-devel --Requires: %{name} = %{version}-%{release} --Group: Development/Libraries/C and C++ --Provides: libapparmor:/usr/include/sys/apparmor.h --Summary: - -- --%description -n libapparmor-devel --- -- --%post -n libapparmor-devel --/sbin/ldconfig -- --%postun -n libapparmor-devel --/sbin/ldconfig -- --%package -n perl-libapparmor --Requires: %{name} = %{version} --Requires: perl = %{perl_version} --Group: Development/Libraries/Perl --Summary: - -- --%description -n perl-libapparmor --- -- --%prep --%setup -q -- --%build --./configure --prefix=%{_prefix} --libdir=%{_libdir} --with-perl --make CFLAGS="${RPM_OPT_FLAGS}" -- --%install --make install DESTDIR="$RPM_BUILD_ROOT" --mkdir ${RPM_BUILD_ROOT}/%{_lib} --# this is really hacky --rm ${RPM_BUILD_ROOT}/%{_libdir}/libapparmor.so --rm ${RPM_BUILD_ROOT}/%{_libdir}/libimmunix.so --cp ${RPM_BUILD_ROOT}/%{_libdir}/libapparmor.so.%{aalibversion} ${RPM_BUILD_ROOT}/%{_lib} --cp ${RPM_BUILD_ROOT}/%{_libdir}/libimmunix.so.%{aalibversion} ${RPM_BUILD_ROOT}/%{_lib} --ln -s /%{_lib}/libapparmor.so.%{aalibversion} ${RPM_BUILD_ROOT}/%{_libdir}/libapparmor.so -- --find $RPM_BUILD_ROOT -name .packlist -exec rm -f {} \; --find $RPM_BUILD_ROOT -name perllocal.pod -exec rm -f {} \; -- --# create symlink for old change_hat(2) manpage --ln -s aa_change_hat.2 ${RPM_BUILD_ROOT}/%{_mandir}/man2/change_hat.2 --%clean --rm -rf "$RPM_BUILD_ROOT" -- --%post --/sbin/ldconfig -- --%postun --/sbin/ldconfig -- --%files --%defattr(-,root,root) --/%{_lib}/libapparmor.so.* --/%{_lib}/libimmunix.so.* -- --%files -n libapparmor-devel --%defattr(-,root,root) --%{_libdir}/libapparmor.so --%{_libdir}/libapparmor.la --%{_libdir}/libapparmor.a --%{_libdir}/libimmunix.la --%{_libdir}/libimmunix.a --%doc %{_mandir}/man*/* --%dir %{_includedir}/aalogparse --%{_includedir}/sys/apparmor.h --%{_includedir}/aalogparse/* -- --%files -n perl-libapparmor --%defattr(-,root,root) --%dir %{perl_vendorarch}/auto/LibAppArmor --%{perl_vendorarch}/auto/LibAppArmor/* --%{perl_vendorarch}/LibAppArmor.pm -- --%changelog --* Sun Sep 16 2007 - sbeattie@suse.de --- aalogparse: add support for type=15xx audit field --- aalogparse: add support for audit messages thru syslog --- aalogparse: reduce noise to stdout on syntax errors --- aalogparse: add support for more missing message types --- aalogparse: parse messages w/safe (hex) string encodings --* Fri Aug 17 2007 - sbeattie@suse.de --- Fix broken symlink for old change_hat(2) manpage --* Wed Aug 15 2007 - sbeattie@suse.de --- fix braindead symbol versioning issue with old version name --- re-enable CFLAGS=RPM_OPT_FLAGS for build --- convert change_hat(2) to aa_change_hat(2) --- use 64bit magic token --- add aa_change_profile(2) interface --* Sat Jul 28 2007 - mbarringer@suse.de --- Merged in libaalogparse to the library/package --* Tue Apr 7 2007 - sbeattie@suse.de --- Add change_hat manpage to package --* Thu Jan 18 2007 - sbeattie@suse.de --- Add a clean stage to remove buildroot to specfile --* Fri Feb 17 2006 Seth Arnold 2.0-4.1 --- use gettid() instead of /proc/self --* Fri Feb 10 2006 Steve Beattie 2.0-3.2 --- Use RPM_OPT_FLAGS --- Fix installed library version to match specfile version --* Wed Feb 1 2006 Steve Beattie 2.0-3.1 --- Fix prototype to match change_hat(2) manpage --* Mon Jan 23 2006 Steve Beattie 2.0-3 --- Rename to libapparmor.so and apparmor.h --* Thu Jan 5 2006 Steve Beattie 2.0-2 --- Add svn repo number to tarball --* Wed Dec 7 2005 Steve Beattie 2.0-1 --- Reset version for inclusion is SUSE autobuild --* Wed Dec 7 2005 Steve Beattie 1.99-8 --- Disable 32bit builds on 64bit platforms for now --* Mon Dec 5 2005 Steve Beattie 1.99-7 --- Rename package to libapparmor --* Wed Aug 10 2005 Steve Beattie 1.99-6_imnx --- Cleanup some of the deprecated exported symbols --* Thu Aug 4 2005 John Johansen 1.99-5_imnx --- and -m31 flag for s390 --* Mon Jul 11 2005 Steve Beattie 1.99-4_imnx --- get rid of libimmunix_post_upgrade --- Re-license to LGPL --- update description --* Fri May 27 2005 Steve Beattie 1.99-3_imnx --- Clear token buffer before freeing. --- Error handling cleanup. --* Fri Feb 18 2005 Steve Beattie 1.99-2_imnx --- Use the right command for the 32bit env on 64bit platforms --- Support for 64bit builds on systems with combined 32/64 support --* Fri Feb 4 2005 Seth Arnold 1.99-1_imnx --- Reversion to 1.99 --* Mon Nov 8 2004 Steve Beattie 1.2-3_imnx --- Finish conversion to slack-capable infrastructure. --* Thu Oct 28 2004 Steve Beattie 1.2-2_imnx --- Added a 'make install' target for prelim slack support --* Tue Oct 12 2004 Steve Beattie 1.2-1_imnx --- Bump version after shass-1.1 branched off --* Thu Sep 23 2004 Steve Beattie 1.0-13_imnx --- Vastly simplify the string handling in change_hat(). --* Thu Sep 9 2004 Steve Beattie 1.0-12_imnx --- Conditionalize group the package shows up in. --* Thu Sep 9 2004 Steve Beattie 1.0-11_imnx --- Fix so change_hat functions correctly even when the token is zero. --* Thu Sep 2 2004 Steve Beattie 1.0-10_imnx --- Added that it provides %{_prefix}/sbin/libimmunix_post_upgrade, this -- was somehow breaking yast. --* Mon Aug 30 2004 Steve Beattie 1.0-9_imnx --- Copyright cleanups. --* Wed Jul 21 2004 Steve Beattie 1.0-8_imnx --- add basis for conditional distro support --* Thu May 28 2004 Tony Jones 1.0-7_imnx --- Add "changehat" command word to start of string written to /proc/pid/attr ---- a/libraries/libapparmor/m4/ac_pod2man.m4 -+++ /dev/null -@@ -1,16 +0,0 @@ --AC_DEFUN([PROG_POD2MAN],[ -- AC_CHECK_PROG(POD2MAN,pod2man,pod2man,no) -- if test "$POD2MAN" = "no"; then -- AC_MSG_ERROR([ --The pod2man program was not found in the default path. pod2man is part of --Perl, which can be retrieved from: -- -- http://www.perl.com/ -- --The latest version at this time is 5.6.1; it is available packaged as the --following archive: -- -- http://www.perl.com/CPAN/src/stable.tar.gz --]) -- fi --]) ---- a/libraries/libapparmor/m4/ac_python_devel.m4 -+++ /dev/null -@@ -1,193 +0,0 @@ --AC_DEFUN([AC_PYTHON_DEVEL],[ -- # -- # Allow the use of a (user set) custom python version -- # -- AC_ARG_VAR([PYTHON_VERSION],[The installed Python -- version to use, for example '2.3'. This string -- will be appended to the Python interpreter -- canonical name.]) -- -- AC_PATH_PROG([PYTHON],[python[$PYTHON_VERSION]]) -- if test -z "$PYTHON"; then -- AC_MSG_ERROR([Cannot find python$PYTHON_VERSION in your system path]) -- PYTHON_VERSION="" -- fi -- -- # -- # Check for a version of Python >= 2.1.0 -- # -- AC_MSG_CHECKING([for a version of Python >= '2.1.0']) -- ac_supports_python_ver=`$PYTHON -c "import sys, string; \ -- ver = string.split(sys.version)[[0]]; \ -- print ver >= '2.1.0'"` -- if test "$ac_supports_python_ver" != "True"; then -- if test -z "$PYTHON_NOVERSIONCHECK"; then -- AC_MSG_RESULT([no]) -- AC_MSG_FAILURE([ --This version of the AC@&t@_PYTHON_DEVEL macro --doesn't work properly with versions of Python before --2.1.0. You may need to re-run configure, setting the --variables PYTHON_CPPFLAGS, PYTHON_LDFLAGS, PYTHON_SITE_PKG, --PYTHON_EXTRA_LIBS and PYTHON_EXTRA_LDFLAGS by hand. --Moreover, to disable this check, set PYTHON_NOVERSIONCHECK --to something else than an empty string. --]) -- else -- AC_MSG_RESULT([skip at user request]) -- fi -- else -- AC_MSG_RESULT([yes]) -- fi -- -- # -- # if the macro parameter ``version'' is set, honour it -- # -- if test -n "$1"; then -- AC_MSG_CHECKING([for a version of Python $1]) -- ac_supports_python_ver=`$PYTHON -c "import sys, string; \ -- ver = string.split(sys.version)[[0]]; \ -- print ver $1"` -- if test "$ac_supports_python_ver" = "True"; then -- AC_MSG_RESULT([yes]) -- else -- AC_MSG_RESULT([no]) -- AC_MSG_ERROR([this package requires Python $1. --If you have it installed, but it isn't the default Python --interpreter in your system path, please pass the PYTHON_VERSION --variable to configure. See ``configure --help'' for reference. --]) -- PYTHON_VERSION="" -- fi -- fi -- -- # -- # Check if you have distutils, else fail -- # -- AC_MSG_CHECKING([for the distutils Python package]) -- ac_distutils_result=`$PYTHON -c "import distutils" 2>&1` -- if test -z "$ac_distutils_result"; then -- AC_MSG_RESULT([yes]) -- else -- AC_MSG_RESULT([no]) -- AC_MSG_ERROR([cannot import Python module "distutils". --Please check your Python installation. The error was: --$ac_distutils_result]) -- PYTHON_VERSION="" -- fi -- -- # -- # Check for Python include path -- # -- AC_MSG_CHECKING([for Python include path]) -- if test -z "$PYTHON_CPPFLAGS"; then -- python_path=`$PYTHON -c "import distutils.sysconfig; \ -- print distutils.sysconfig.get_python_inc();"` -- if test -n "${python_path}"; then -- python_path="-I$python_path" -- fi -- PYTHON_CPPFLAGS=$python_path -- fi -- AC_MSG_RESULT([$PYTHON_CPPFLAGS]) -- AC_SUBST([PYTHON_CPPFLAGS]) -- -- # -- # Check for Python library path -- # -- AC_MSG_CHECKING([for Python library path]) -- if test -z "$PYTHON_LDFLAGS"; then -- # (makes two attempts to ensure we've got a version number -- # from the interpreter) -- py_version=`$PYTHON -c "from distutils.sysconfig import *; \ -- from string import join; \ -- print join(get_config_vars('VERSION'))"` -- if test "$py_version" == "[None]"; then -- if test -n "$PYTHON_VERSION"; then -- py_version=$PYTHON_VERSION -- else -- py_version=`$PYTHON -c "import sys; \ -- print sys.version[[:3]]"` -- fi -- fi -- -- PYTHON_LDFLAGS=`$PYTHON -c "from distutils.sysconfig import *; \ -- from string import join; \ -- print '-L' + get_python_lib(0,1), \ -- '-lpython';"`$py_version -- fi -- AC_MSG_RESULT([$PYTHON_LDFLAGS]) -- AC_SUBST([PYTHON_LDFLAGS]) -- -- # -- # Check for site packages -- # -- AC_MSG_CHECKING([for Python site-packages path]) -- if test -z "$PYTHON_SITE_PKG"; then -- PYTHON_SITE_PKG=`$PYTHON -c "import distutils.sysconfig; \ -- print distutils.sysconfig.get_python_lib(0,0);"` -- fi -- AC_MSG_RESULT([$PYTHON_SITE_PKG]) -- AC_SUBST([PYTHON_SITE_PKG]) -- -- # -- # libraries which must be linked in when embedding -- # -- AC_MSG_CHECKING(python extra libraries) -- if test -z "$PYTHON_EXTRA_LIBS"; then -- PYTHON_EXTRA_LIBS=`$PYTHON -c "import distutils.sysconfig; \ -- conf = distutils.sysconfig.get_config_var; \ -- print conf('LOCALMODLIBS'), conf('LIBS')"` -- fi -- AC_MSG_RESULT([$PYTHON_EXTRA_LIBS]) -- AC_SUBST(PYTHON_EXTRA_LIBS) -- -- # -- # linking flags needed when embedding -- # -- AC_MSG_CHECKING(python extra linking flags) -- if test -z "$PYTHON_EXTRA_LDFLAGS"; then -- PYTHON_EXTRA_LDFLAGS=`$PYTHON -c "import distutils.sysconfig; \ -- conf = distutils.sysconfig.get_config_var; \ -- print conf('LINKFORSHARED')"` -- fi -- AC_MSG_RESULT([$PYTHON_EXTRA_LDFLAGS]) -- AC_SUBST(PYTHON_EXTRA_LDFLAGS) -- -- # -- # final check to see if everything compiles alright -- # -- AC_MSG_CHECKING([consistency of all components of python development environment]) -- AC_LANG_PUSH([C]) -- # save current global flags -- LIBS="$ac_save_LIBS $PYTHON_LDFLAGS" -- CPPFLAGS="$ac_save_CPPFLAGS $PYTHON_CPPFLAGS" -- AC_TRY_LINK([ -- #include -- ],[ -- Py_Initialize(); -- ],[pythonexists=yes],[pythonexists=no]) -- -- AC_MSG_RESULT([$pythonexists]) -- -- if test ! "$pythonexists" = "yes"; then -- AC_MSG_ERROR([ -- Could not link test program to Python. Maybe the main Python library has been -- installed in some non-standard library path. If so, pass it to configure, -- via the LDFLAGS environment variable. -- Example: ./configure LDFLAGS="-L/usr/non-standard-path/python/lib" -- ============================================================================ -- ERROR! -- You probably have to install the development version of the Python package -- for your distribution. The exact name of this package varies among them. -- ============================================================================ -- ]) -- PYTHON_VERSION="" -- fi -- AC_LANG_POP -- # turn back to default flags -- CPPFLAGS="$ac_save_CPPFLAGS" -- LIBS="$ac_save_LIBS" -- -- # -- # all done! -- # --]) ---- a/libraries/libapparmor/src/Makefile.am -+++ b/libraries/libapparmor/src/Makefile.am -@@ -23,10 +23,10 @@ noinst_HEADERS = grammar.h parser.h scan - - libapparmor_la_SOURCES = grammar.y libaalogparse.c kernel_interface.c scanner.c - libapparmor_la_LDFLAGS = -version-info 1:2:0 -XCClinker -dynamic \ -- -Wl,--version-script=$(top_srcdir)/src/libapparmor.map -Wl,-soname=libapparmor.so.1 -+ -Wl,--version-script=$(srcdir)/libapparmor.map -Wl,-soname=libapparmor.so.1 - - libimmunix_la_SOURCES = kernel_interface.c libimmunix_warning.c --libimmunix_la_LDFLAGS = -version-info 1:2:0 -Wl,--version-script=$(top_srcdir)/src/libapparmor.map -Wl,-soname=libimmunix.so.1 -+libimmunix_la_LDFLAGS = -version-info 1:2:0 -Wl,--version-script=$(srcdir)/libapparmor.map -Wl,-soname=libimmunix.so.1 - - tst_aalogmisc_SOURCES = tst_aalogmisc.c - tst_aalogmisc_LDADD = .libs/libapparmor.a ---- a/libraries/libapparmor/swig/perl/Makefile.PL.in -+++ /dev/null -@@ -1,17 +0,0 @@ --#!/usr/bin/perl -w -- --use ExtUtils::MakeMaker; -- --use vars qw($CFLAGS $OBJECT $VERSION $OPTIMIZE); -- --WriteMakefile( -- 'NAME' => 'LibAppArmor', -- 'MAKEFILE' => 'Makefile.perl', -- 'FIRST_MAKEFILE' => 'Makefile.perl', -- 'ABSTRACT' => q[Perl interface to AppArmor] , -- 'VERSION' => q[@VERSION@], -- 'INC' => q[-I@top_srcdir@/src @CFLAGS@], -- 'LIBS' => q[-L@top_builddir@/src/.libs/ -lapparmor @LIBS@], -- 'OBJECT' => 'libapparmor_wrap.o', # $(OBJ_EXT) --) ; -- ---- a/libraries/libapparmor/swig/perl/Makefile.am -+++ b/libraries/libapparmor/swig/perl/Makefile.am -@@ -1,32 +1,21 @@ --EXTRA_DIST =Makefile.PL libapparmor_wrap.c LibAppArmor.pm examples/*.pl -+EXTRA_DIST = libapparmor_wrap.c LibAppArmor.pm examples/*.pl - - if HAVE_PERL --noinst_DATA =LibAppArmor.so -+vendorarchdir = $(VENDOR_ARCH_PERL) -+vendorarch_DATA = LibAppArmor.pm -+ -+apparmordir = $(vendorarchdir)/auto/LibAppArmor -+apparmor_LTLIBRARIES = LibAppArmor.la -+ -+LibAppArmor_la_LDFLAGS = -module -no-undefined -avoid-version -+LibAppArmor_la_SOURCES = libapparmor_wrap.c -+LibAppArmor_la_LIBADD = $(top_builddir)/libraries/libapparmor/src/.libs/libapparmor.la -+ -+LibAppArmor_la_CFLAGS = -I$(top_srcdir)/libraries/libapparmor/src $(CFLAGS) $(PERL_CCFLAGS) - - libapparmor_wrap.c: $(srcdir)/../SWIG/libapparmor.i - $(SWIG) -perl -I$(srcdir)/../../src -module LibAppArmor -o $@ $(srcdir)/../SWIG/libapparmor.i -+LibAppArmor.pm: libapparmor_wrap.c - - MOSTLYCLEANFILES=libapparmor_wrap.c LibAppArmor.pm -- --Makefile.perl: Makefile.PL -- $(PERL) $< PREFIX=$(prefix) MAKEFILE=$@ -- sed -ie 's/^LD_RUN_PATH.*//g' Makefile.perl -- --LibAppArmor.so: libapparmor_wrap.c Makefile.perl -- if test ! -f libapparmor_wrap.c; then cp $(srcdir)/libapparmor_wrap.c . ; fi -- $(MAKE) -fMakefile.perl -- if test $(top_srcdir) != $(top_builddir) ; then rm -f libapparmor_wrap.c ; fi -- --install-exec-local: Makefile.perl -- $(MAKE) -fMakefile.perl install_vendor -- --# sadly there is no make uninstall for perl --#uninstall-local: Makefile.perl --#$(MAKE) -fMakefile.perl uninstall -- --clean-local: -- if test -f Makefile.perl; then $(MAKE) -fMakefile.perl realclean; fi --#rm -f Makefile.perl Makefile.perl.old -- rm -f *.so # *.o -- - endif ---- a/libraries/libapparmor/swig/python/Makefile.am -+++ b/libraries/libapparmor/swig/python/Makefile.am -@@ -9,7 +9,6 @@ libapparmor_wrap.c: $(srcdir)/../SWIG/li - MOSTLYCLEANFILES=libapparmor_wrap.c __init__.py - - all-local: libapparmor_wrap.c setup.py -- if test ! -f libapparmor_wrap.c; then cp $(srcdir)/libapparmor_wrap.c . ; fi - $(PYTHON) setup.py build - - install-exec-local: -@@ -18,6 +17,5 @@ install-exec-local: - clean-local: - if test -x "$(PYTHON)"; then $(PYTHON) setup.py clean; fi - rm -rf build -- if test $(top_srcdir) != $(top_builddir) ; then rm -f libapparmor_wrap.c ; fi - - endif ---- a/libraries/libapparmor/swig/ruby/Makefile.am -+++ b/libraries/libapparmor/swig/ruby/Makefile.am -@@ -1,28 +1,17 @@ - if HAVE_RUBY - --EXTRA_DIST = extconf.rb LibAppArmor_wrap.c examples/*.rb --noinst_DATA = LibAppArmor.so -+EXTRA_DIST = LibAppArmor_wrap.c examples/*.rb - --LibAppArmor_wrap.c : $(srcdir)/../SWIG/libapparmor.i -- $(SWIG) -ruby -module LibAppArmor -I$(top_srcdir)/src -o $@ $(srcdir)/../SWIG/libapparmor.i -- --MOSTLYCLEANFILES=LibAppArmor_wrap.c -+rbexec_LTLIBRARIES = LibAppArmor.la - --Makefile.ruby: extconf.rb -- PREFIX=$(prefix) $(RUBY) $< --with-LibAppArmor-include=$(top_srcdir)/src -+LibAppArmor_la_LDFLAGS = -module -no-undefined -avoid-version -+LibAppArmor_la_SOURCES = LibAppArmor_wrap.c -+LibAppArmor_la_LIBADD = $(builddir)/../../src/.libs/libapparmor.la -+LibAppArmor_la_CFLAGS = -I$(top_srcdir)/libraries/libapparmor/src $(CFLAGS) - --LibAppArmor.so: LibAppArmor_wrap.c Makefile.ruby -- $(MAKE) -fMakefile.ruby -- --install-exec-local: Makefile.ruby -- $(MAKE) -fMakefile.ruby install -+LibAppArmor_wrap.c : $(srcdir)/../SWIG/libapparmor.i -+ $(SWIG) -ruby -module LibAppArmor -I$(srcdir)/../../src -o $@ $(srcdir)/../SWIG/libapparmor.i - --#uninstall --#./lib/ruby/site_ruby/1.8/i686-linux/LibAppArmor.so -- --clean-local: -- if test -f Makefile.ruby; then $(MAKE) -fMakefile.ruby clean; fi -- rm -f Makefile.ruby Makefile.new -- rm -f *.o *.so *.log -+MOSTLYCLEANFILES=LibAppArmor_wrap.c - - endif ---- a/libraries/libapparmor/swig/ruby/extconf.rb -+++ /dev/null -@@ -1,37 +0,0 @@ --#!/usr/bin/env ruby -- --require 'mkmf' -- --# hack 1: ruby black magic to write a Makefile.new instead of a Makefile --alias open_orig open --def open(path, mode=nil, perm=nil) -- path = 'Makefile.new' if path == 'Makefile' -- if block_given? -- open_orig(path, mode, perm) { |io| yield(io) } -- else -- open_orig(path, mode, perm) -- end --end -- --if ENV['PREFIX'] -- prefix = CONFIG['prefix'] -- %w[ prefix sitedir datadir infodir mandir oldincludedir ].each do |key| -- CONFIG[key] = CONFIG[key].sub(/#{prefix}/, ENV['PREFIX']) -- end --end -- --dir_config('LibAppArmor') --if find_library('apparmor', 'parse_record', '../../src/.libs') and -- have_header('aalogparse.h') -- create_makefile('LibAppArmor') -- -- # hack 2: strip all rpath references -- open('Makefile.ruby', 'w') do |out| -- IO.foreach('Makefile.new') do |line| -- out.puts line.gsub(/-Wl,-R'[^']*'/, '') -- end -- end --else -- puts 'apparmor lib not found' --end -- ---- a/libraries/libapparmor/testsuite/Makefile.am -+++ b/libraries/libapparmor/testsuite/Makefile.am -@@ -2,7 +2,7 @@ SUBDIRS = lib config libaalogparse.test - PACKAGE = libaalogparse - AUTOMAKE_OPTIONS = dejagnu - --INCLUDES = -I. -I$(top_srcdir)/src -+INCLUDES = -I. -I$(srcdir)/../src - - AM_CPPFLAGS = $(DEBUG_FLAGS) -DLOCALEDIR=\"${localedir}\" - AM_CFLAGS = -Wall ---- /dev/null -+++ b/m4/ac_pod2man.m4 -@@ -0,0 +1,16 @@ -+AC_DEFUN([PROG_POD2MAN],[ -+ AC_CHECK_PROG(POD2MAN,pod2man,pod2man,no) -+ if test "$POD2MAN" = "no"; then -+ AC_MSG_ERROR([ -+The pod2man program was not found in the default path. pod2man is part of -+Perl, which can be retrieved from: -+ -+ http://www.perl.com/ -+ -+The latest version at this time is 5.6.1; it is available packaged as the -+following archive: -+ -+ http://www.perl.com/CPAN/src/stable.tar.gz -+]) -+ fi -+]) ---- /dev/null -+++ b/m4/ac_python_devel.m4 -@@ -0,0 +1,209 @@ -+AC_DEFUN([AC_PYTHON_DEVEL],[ -+ # -+ # Allow the use of a (user set) custom python version -+ # -+ AC_ARG_VAR([PYTHON_VERSION],[The installed Python -+ version to use, for example '2.3'. This string -+ will be appended to the Python interpreter -+ canonical name.]) -+ -+ AC_PATH_PROG([PYTHON],[python[$PYTHON_VERSION]]) -+ if test -z "$PYTHON"; then -+ AC_MSG_ERROR([Cannot find python$PYTHON_VERSION in your system path]) -+ PYTHON_VERSION="" -+ fi -+ -+ # -+ # Check for a version of Python >= 2.1.0 -+ # -+ AC_MSG_CHECKING([for a version of Python >= '2.1.0']) -+ ac_supports_python_ver=`$PYTHON -c "import sys, string; \ -+ ver = string.split(sys.version)[[0]]; \ -+ print ver >= '2.1.0'"` -+ if test "$ac_supports_python_ver" != "True"; then -+ if test -z "$PYTHON_NOVERSIONCHECK"; then -+ AC_MSG_RESULT([no]) -+ AC_MSG_FAILURE([ -+This version of the AC@&t@_PYTHON_DEVEL macro -+doesn't work properly with versions of Python before -+2.1.0. You may need to re-run configure, setting the -+variables PYTHON_CPPFLAGS, PYTHON_LDFLAGS, PYTHON_SITE_PKG, -+PYTHON_EXTRA_LIBS and PYTHON_EXTRA_LDFLAGS by hand. -+Moreover, to disable this check, set PYTHON_NOVERSIONCHECK -+to something else than an empty string. -+]) -+ else -+ AC_MSG_RESULT([skip at user request]) -+ fi -+ else -+ AC_MSG_RESULT([yes]) -+ fi -+ -+ # -+ # if the macro parameter ``version'' is set, honour it -+ # -+ if test -n "$1"; then -+ AC_MSG_CHECKING([for a version of Python $1]) -+ ac_supports_python_ver=`$PYTHON -c "import sys, string; \ -+ ver = string.split(sys.version)[[0]]; \ -+ print ver $1"` -+ if test "$ac_supports_python_ver" = "True"; then -+ AC_MSG_RESULT([yes]) -+ else -+ AC_MSG_RESULT([no]) -+ AC_MSG_ERROR([this package requires Python $1. -+If you have it installed, but it isn't the default Python -+interpreter in your system path, please pass the PYTHON_VERSION -+variable to configure. See ``configure --help'' for reference. -+]) -+ PYTHON_VERSION="" -+ fi -+ fi -+ -+ # -+ # Check if you have distutils, else fail -+ # -+ AC_MSG_CHECKING([for the distutils Python package]) -+ ac_distutils_result=`$PYTHON -c "import distutils" 2>&1` -+ if test -z "$ac_distutils_result"; then -+ AC_MSG_RESULT([yes]) -+ else -+ AC_MSG_RESULT([no]) -+ AC_MSG_ERROR([cannot import Python module "distutils". -+Please check your Python installation. The error was: -+$ac_distutils_result]) -+ PYTHON_VERSION="" -+ fi -+ -+ # -+ # Check for Python include path -+ # -+ AC_MSG_CHECKING([for Python include path]) -+ if test -z "$PYTHON_CPPFLAGS"; then -+ python_path=`$PYTHON -c "import distutils.sysconfig; \ -+ print distutils.sysconfig.get_python_inc();"` -+ if test -n "${python_path}"; then -+ python_path="-I$python_path" -+ fi -+ PYTHON_CPPFLAGS=$python_path -+ fi -+ AC_MSG_RESULT([$PYTHON_CPPFLAGS]) -+ AC_SUBST([PYTHON_CPPFLAGS]) -+ -+ # -+ # Check for Python library path -+ # -+ AC_MSG_CHECKING([for Python library path]) -+ if test -z "$PYTHON_LDFLAGS"; then -+ # (makes two attempts to ensure we've got a version number -+ # from the interpreter) -+ py_version=`$PYTHON -c "from distutils.sysconfig import *; \ -+ from string import join; \ -+ print join(get_config_vars('VERSION'))"` -+ if test "$py_version" == "[None]"; then -+ if test -n "$PYTHON_VERSION"; then -+ py_version=$PYTHON_VERSION -+ else -+ py_version=`$PYTHON -c "import sys; \ -+ print sys.version[[:3]]"` -+ fi -+ fi -+ -+ PYTHON_LDFLAGS=`$PYTHON -c "from distutils.sysconfig import *; \ -+ from string import join; \ -+ print '-L' + get_python_lib(0,1), \ -+ '-lpython';"`$py_version -+ fi -+ AC_MSG_RESULT([$PYTHON_LDFLAGS]) -+ AC_SUBST([PYTHON_LDFLAGS]) -+ -+ # -+ # Check for site packages -+ # -+ AC_MSG_CHECKING([for Python site-packages path]) -+ if test -z "$PYTHON_SITE_PKG"; then -+ PYTHON_SITE_PKG=`$PYTHON -c "import distutils.sysconfig; \ -+ print distutils.sysconfig.get_python_lib(0,0);"` -+ fi -+ AC_MSG_RESULT([$PYTHON_SITE_PKG]) -+ AC_SUBST([PYTHON_SITE_PKG]) -+ -+ # -+ # libraries which must be linked in when embedding -+ # -+ AC_MSG_CHECKING(python extra libraries) -+ if test -z "$PYTHON_EXTRA_LIBS"; then -+ PYTHON_EXTRA_LIBS=`$PYTHON -c "import distutils.sysconfig; \ -+ conf = distutils.sysconfig.get_config_var; \ -+ print conf('LOCALMODLIBS'), conf('LIBS')"` -+ fi -+ AC_MSG_RESULT([$PYTHON_EXTRA_LIBS]) -+ AC_SUBST(PYTHON_EXTRA_LIBS) -+ -+ # -+ # linking flags needed when embedding -+ # -+ AC_MSG_CHECKING(python extra linking flags) -+ if test -z "$PYTHON_EXTRA_LDFLAGS"; then -+ PYTHON_EXTRA_LDFLAGS=`$PYTHON -c "import distutils.sysconfig; \ -+ conf = distutils.sysconfig.get_config_var; \ -+ print conf('LINKFORSHARED')"` -+ fi -+ AC_MSG_RESULT([$PYTHON_EXTRA_LDFLAGS]) -+ AC_SUBST(PYTHON_EXTRA_LDFLAGS) -+ -+ # -+ # final check to see if everything compiles alright -+ # -+ AC_MSG_CHECKING([consistency of all components of python development environment]) -+ AC_LANG_PUSH([C]) -+ # save current global flags -+ LIBS="$ac_save_LIBS $PYTHON_LDFLAGS" -+ CPPFLAGS="$ac_save_CPPFLAGS $PYTHON_CPPFLAGS" -+ AC_TRY_LINK([ -+ #include -+ ],[ -+ Py_Initialize(); -+ ],[pythonexists=yes],[pythonexists=no]) -+ -+ AC_MSG_RESULT([$pythonexists]) -+ -+ if test ! "$pythonexists" = "yes"; then -+ AC_MSG_ERROR([ -+ Could not link test program to Python. Maybe the main Python library has been -+ installed in some non-standard library path. If so, pass it to configure, -+ via the LDFLAGS environment variable. -+ Example: ./configure LDFLAGS="-L/usr/non-standard-path/python/lib" -+ ============================================================================ -+ ERROR! -+ You probably have to install the development version of the Python package -+ for your distribution. The exact name of this package varies among them. -+ ============================================================================ -+ ]) -+ PYTHON_VERSION="" -+ fi -+ AC_LANG_POP -+ # turn back to default flags -+ CPPFLAGS="$ac_save_CPPFLAGS" -+ LIBS="$ac_save_LIBS" -+ -+ # -+ # all done! -+ # -+]) -+ -+ -+AC_DEFUN([PROG_PYTHON],[ -+AC_ARG_WITH(python, -+[ --with-python enable python support for libapparmor [[default=auto]]], -+[with_python=$withval], [with_python=auto]) -+ -+if test "$with_python" != "no"; then -+ AM_PATH_PYTHON(,, [no]) -+ if test "$with_python" = "yes" -a "$PYTHON" = "no"; then -+ AC_MSG_ERROR([--with-python was given but python could not be found]) -+ elif test "$PYTHON" = "no"; then -+ AC_MSG_RESULT([ --- python support for libappamor not available]) -+ fi -+fi -+]) ---- /dev/null -+++ b/m4/am_path_apxs.m4 -@@ -0,0 +1,12 @@ -+AC_DEFUN([AM_PATH_APXS],[ -+ m4_define_default([_AM_APXS_LIST], [apxs2 apxs]) -+ -+ AC_PATH_PROGS(APXS, _AM_APXS_LIST, :, [PATH /usr/sbin /usr/bin]) -+ -+ if test "$APXS" = :; then -+ m4_default([$3], [AC_MSG_ERROR([no apache extension tool found])]) -+ else -+ echo -n -+ $2 -+ fi -+]) ---- /dev/null -+++ b/m4/am_path_perl.m4 -@@ -0,0 +1,25 @@ -+AC_DEFUN([AM_PATH_PERL],[ -+ m4_define_default([_AM_PERL_LIST], [perl perl6 perl5]) -+ -+ AC_PATH_PROGS(PERL, _AM_PERL_LIST, :,) -+ -+ if test "$PERL" = :; then -+ m4_default([$3], [AC_MSG_ERROR([no perl interpreter found])]) -+ else -+ VENDOR_PERL=`$PERL -V::vendorlib:` -+ VENDOR_PERL=`eval echo $VENDOR_PERL` -+ AC_MSG_RESULT([Perl vendirlib is $VENDOR_PERL]) -+ AC_SUBST(VENDOR_PERL) -+ -+ VENDOR_ARCH_PERL=`$PERL -V::vendorarch:` -+ VENDOR_ARCH_PERL=`eval echo $VENDOR_ARCH_PERL` -+ AC_MSG_RESULT([Perl vendorarch is $VENDOR_ARCH_PERL]) -+ AC_SUBST(VENDOR_ARCH_PERL) -+ -+ PERL_CCFLAGS=`$PERL -MExtUtils::Embed -e ccopts` -+ AC_SUBST(PERL_CCFLAGS) -+ -+ $2 -+ fi -+]) -+ ---- /dev/null -+++ b/m4/am_path_ruby.m4 -@@ -0,0 +1,115 @@ -+# -+# AM_PATH_RUBY([MINIMUM-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) -+# --------------------------------------------------------------------------- -+# Adds support for distributing Ruby modules and packages. To -+# install modules, copy them to $(rubydir), using the ruby_RUBY -+# automake variable. To install a package with the same name as the -+# automake package, install to $(pkgrubydir), or use the -+# pkgruby_RUBY automake variable. -+# -+# The variables $(rbexecdir) and $(pkgrbexecdir) are provided as -+# locations to install ruby extension modules (shared libraries). -+# Another macro is required to find the appropriate flags to compile -+# extension modules. -+# -+AC_DEFUN([AM_PATH_RUBY], -+ [ -+ dnl Find a Ruby interpreter. -+ m4_define_default([_AM_RUBY_INTERPRETER_LIST], -+ [ruby ruby1.8 ruby1.7 ruby1.6]) -+ -+ m4_if([$1],[],[ -+ dnl No version check is needed. -+ # Find any Ruby interpreter. -+ if test -z "$RUBY"; then -+ AC_PATH_PROGS([RUBY], _AM_RUBY_INTERPRETER_LIST, :) -+ fi -+ am_display_RUBY=ruby -+ ], [ -+ dnl A version check is needed. -+ if test -n "$RUBY"; then -+ # If the user set $RUBY, use it and don't search something else. -+ #AC_MSG_CHECKING([whether $RUBY version >= $1]) -+ #AM_RUBY_CHECK_VERSION([$RUBY], [$1], -+ # [AC_MSG_RESULT(yes)], -+ # [AC_MSG_ERROR(too old)]) -+ am_display_RUBY=$RUBY -+ else -+ # Otherwise, try each interpreter until we find one that satisfies -+ # VERSION. -+ AC_CACHE_CHECK([for a Ruby interpreter with version >= $1], -+ [am_cv_pathless_RUBY],[ -+ for am_cv_pathless_RUBY in _AM_RUBY_INTERPRETER_LIST none; do -+ test "$am_cv_pathless_RUBY" = none && break -+ #AM_RUBY_CHECK_VERSION([$am_cv_pathless_RUBY], [$1], [break]) -+ [], [$1], [break]) -+ done]) -+ # Set $RUBY to the absolute path of $am_cv_pathless_RUBY. -+ if test "$am_cv_pathless_RUBY" = none; then -+ RUBY=: -+ else -+ AC_PATH_PROG([RUBY], [$am_cv_pathless_RUBY]) -+ fi -+ am_display_RUBY=$am_cv_pathless_RUBY -+ fi -+ ]) -+ -+ if test "$RUBY" = :; then -+ dnl Run any user-specified action, or abort. -+ m4_default([$3], [AC_MSG_ERROR([no suitable Ruby interpreter found])]) -+ else -+ -+ dnl Query Ruby for its version number. Getting [:3] seems to be -+ dnl the best way to do this; it's what "site.py" does in the standard -+ dnl library. -+ -+ AC_CACHE_CHECK([for $am_display_RUBY version], [am_cv_ruby_version], -+ [am_cv_ruby_version=`$RUBY -e "print RUBY_VERSION"`]) -+ AC_SUBST([RUBY_VERSION], [$am_cv_ruby_version]) -+ -+ dnl Use the values of $prefix and $exec_prefix for the corresponding -+ dnl values of RUBY_PREFIX and RUBY_EXEC_PREFIX. These are made -+ dnl distinct variables so they can be overridden if need be. However, -+ dnl general consensus is that you shouldn't need this ability. -+ -+ AC_SUBST([RUBY_PREFIX], ['${prefix}']) -+ AC_SUBST([RUBY_EXEC_PREFIX], ['${exec_prefix}']) -+ -+ dnl At times (like when building shared libraries) you may want -+ dnl to know which OS platform Ruby thinks this is. -+ -+ AC_CACHE_CHECK([for $am_display_RUBY platform], [am_cv_ruby_platform], -+ [am_cv_ruby_platform=`$RUBY -e "print RUBY_PLATFORM"`]) -+ AC_SUBST([RUBY_PLATFORM], [$am_cv_ruby_platform]) -+ -+ -+ dnl Set up 4 directories: -+ dnl rubydir -- where to install ruby scripts. -+ AC_CACHE_CHECK([for $am_display_RUBY script directory], -+ [am_cv_ruby_rubydir], -+ [am_cv_ruby_rubydir=`$RUBY -rrbconfig -e "drive = File::PATH_SEPARATOR == ';' ? /\A\w:/ : /\A/; prefix = Regexp.new('\\A' + Regexp.quote(Config::CONFIG[['prefix']])); \\$prefix = Config::CONFIG[['prefix']].sub(drive, ''); \\$archdir = Config::CONFIG[['archdir']].sub(prefix, '\\$(prefix)').sub(drive, ''); print \\$archdir;"`]) -+ AC_SUBST([rubydir], [$am_cv_ruby_rubydir]) -+ -+ dnl pkgrubydir -- $PACKAGE directory under rubydir. -+ AC_SUBST([pkgrubydir], [\${rubydir}/$PACKAGE]) -+ -+ dnl rbexecdir -- directory for installing ruby extension modules -+ dnl (shared libraries) -+ AC_CACHE_CHECK([for $am_display_RUBY extension module directory], -+ [am_cv_ruby_rbexecdir], -+ [am_cv_ruby_rbexecdir=`$RUBY -rrbconfig -e "drive = File::PATH_SEPARATOR == ';' ? /\A\w:/ : /\A/; prefix = Regexp.new('\\A' + Regexp.quote(Config::CONFIG[['prefix']])); \\$prefix = Config::CONFIG[['prefix']].sub(drive, ''); \\$sitearchdir = Config::CONFIG[['sitearchdir']].sub(prefix, '\\$(prefix)').sub(drive, ''); print \\$sitearchdir;" 2>/dev/null || echo "${RUBY_EXEC_PREFIX}/local/lib/site_ruby/${RUBY_VERSION}/${RUBY_PLATFORM}"`]) -+ AC_SUBST([rbexecdir], [$am_cv_ruby_rbexecdir]) -+ -+ RUBY_INCLUDE_DIR=`$RUBY -r rbconfig -e 'puts Config::CONFIG[["archdir"]]'` -+ RUBY_INCLUDES=" -I $RUBY_INCLUDE_DIR" -+ AC_SUBST([RUBY_INCLUDES]) -+ -+ dnl pkgrbexecdir -- $(rbexecdir)/$(PACKAGE) -+ -+ AC_SUBST([pkgrbexecdir], [\${rbexecdir}/$PACKAGE]) -+ -+ dnl Run any user-specified action. -+ $2 -+ fi -+ -+]) ---- /dev/null -+++ b/m4/wxwidgets.m4 -@@ -0,0 +1,37 @@ -+AC_DEFUN([WXTEST], -+[ -+ AC_REQUIRE([AC_PROG_AWK]) -+ WXCONFIG=wx-config -+ AC_ARG_WITH(wx-config, -+ [[ --with-wx-config=FILE Use the given path to wx-config when determining -+ wxWidgets configuration; defaults to "wx-config"]], -+ [ -+ if test "$withval" != "yes" -a "$withval" != ""; then -+ WXCONFIG=$withval -+ fi -+ ]) -+ -+ wxversion=0 -+ -+ -+ AC_MSG_CHECKING([wxWidgets version]) -+ if wxversion=`$WXCONFIG --version`; then -+ AC_MSG_RESULT([$wxversion]) -+ # Verify minimus requires -+ vers=[`echo $wxversion | $AWK 'BEGIN { FS = "."; } { printf "% d", ($''1 * 1000 + $''2) * 1000 + $''3;}'`] -+ if test -n "$vers" && test "$vers" -ge 2006000; then -+ WX_CPPFLAGS="`$WXCONFIG --cppflags`" -+ WX_CXXFLAGS="`$WXCONFIG --cxxflags | sed -e 's/-fno-exceptions//'`" -+ WX_LIBS="`$WXCONFIG --libs`" -+ AC_SUBST(WX_CXXFLAGS) -+ AC_SUBST(WX_CPPFLAGS) -+ AC_SUBST(WX_LIBS) -+ else -+ AC_MSG_ERROR([wxWidgets 2.6.0 or newer is required]) -+ fi -+ -+ else -+ AC_MSG_RESULT([not found]) -+ AC_MSG_ERROR([wxWidgets is required. Try --with-wx-config.]) -+ fi]) -+ ---- /dev/null -+++ b/parser/Makefile.am -@@ -0,0 +1,81 @@ -+SUBDIRS = libapparmor_re po -+ -+lib_apparmor_DATA = rc.apparmor.functions -+etc_apparmor_DATA = subdomain.conf -+noinst_DATA = techdoc.pdf techdoc/techdoc.html techdoc/techdoc.css \ -+ techdoc.txt apparmor.d.5.html apparmor.7.html \ -+ apparmor_parser.8.html subdomain.conf.5.html \ -+ apparmor.vim.5.html -+ -+real_sbin_PROGRAMS = apparmor_parser -+dist_man_MANS = apparmor.d.5 apparmor.7 apparmor_parser.8 subdomain.conf.5 \ -+ apparmor.vim.5 -+ -+BUILT_SOURCES = parser_lex.c parser_yacc.c af_names.h cap_names.h -+AM_YFLAGS = -d -+AM_CFLAGS = -DLOCALEDIR=\"$(localedir)\" -+apparmor_parser_SOURCES = parser_yacc.y parser_lex.l parser_include.c \ -+ parser_interface.c parser_main.c parser_misc.c \ -+ parser_merge.c parser_symtab.c parser_regex.c \ -+ parser_variable.c parser_policy.c parser_alias.c -+ -+libstdc++.a: -+ ln -s `g++ -print-file-name=libstdc++.a` -+ -+apparmor_parser_LDFLAGS = -static-libgcc -+apparmor_parser_LDADD = $(top_srcdir)/libraries/libapparmor/src/libapparmor.la \ -+ $(srcdir)/libapparmor_re/libapparmor_re.la \ -+ libstdc++.a -lpcre -+ -+install-data-local: -+ $(mkinstalldirs) $(DESTDIR)/var/lib/apparmor -+ -+CLEANFILES = $(BUILT_SOURCES) $(doc_DATA) $(dist_man_MANS) -+ -+# These are the families that it doesn't make sense for apparmor to mediate. -+# We use PF_ here since that is what is required in bits/socket.h, but we will -+# rewrite these as AF_. -+FILTER_FAMILIES=PF_RXRPC PF_MAX PF_UNSPEC PF_UNIX PF_LOCAL PF_NETLINK PF_LLC PF_IUCV PF_TIPC PF_CAN PF_ISDN PF_PHONET -+ -+__FILTER=$(shell echo $(strip $(FILTER_FAMILIES)) | sed -e 's/ /\\\|/g') -+ -+af_names.h: /usr/include/bits/socket.h -+ LC_ALL=C sed -n -e '/$(__FILTER)/d' -e "s/^\#define[ \\t]\\+PF_\\([A-Z0-9_]\\+\\)[ \\t]\\+\\([0-9]\\+\\)\\(.*\\)\$$/#ifndef AF_\\1\\n# define AF_\\1 \\2\\n#endif\\nAA_GEN_NET_ENT(\"\\L\\1\", \\UAF_\\1)\\n/p" $< > $@ -+ LC_ALL=C sed -n -e "s/^\#define[ \\t]\\+PF_MAX[ \\t]\\+\\([0-9]\\+\\)[ \\t]\\+.*/#define AA_AF_MAX \\1\n/p" $< >> $@ -+ cat $@ -+ -+cap_names.h: /usr/include/linux/capability.h -+ LC_ALL=C sed -n -e "/CAP_EMPTY_SET/d" -e "s/^\#define[ \\t]\\+CAP_\\([A-Z0-9_]\\+\\)[ \\t]\\+\\([0-9xa-f]\\+\\)\\(.*\\)\$$/\{\"\\L\\1\", \\UCAP_\\1\},/p" $< > $@ -+ -+.tex.pdf: -+ while pdflatex $< || exit 1; \ -+ grep -q "Label(s) may have changed" techdoc.log; \ -+ do :; done -+ -+techdoc/techdoc.css : -+techdoc/%.html : %.tex -+#.tex.html: -+ latex2html -show_section_numbers -split 0 -noinfo -nonavigation -noaddress $< -+ -+%.txt : techdoc/%.html -+ w3m -dump $< > $@ -+ -+PODARGS = --center=AppArmor --release=NOVELL/SUSE -+ -+pod2man = pod2man $(PODARGS) --section $(subst .,,$(suffix $<)) $< > $@ -+ -+.pod.5: -+ $(pod2man) -+.pod.7: -+ $(pod2man) -+.pod.8: -+ $(pod2man) -+ -+pod2html = pod2html --header --css ../common/apparmor.css --infile=$< --outfile=$@ -+ -+%.5.html : %.pod -+ $(pod2html) -+%.7.html : %.pod -+ $(pod2html) -+%.8.html : %.pod -+ $(pod2html) ---- /dev/null -+++ b/parser/libapparmor_re/Makefile.am -@@ -0,0 +1,4 @@ -+ -+noinst_LTLIBRARIES = libapparmor_re.la -+ -+libapparmor_re_la_SOURCES = regexp.yy ---- a/parser/libapparmor_re/regexp.y -+++ /dev/null -@@ -1,3082 +0,0 @@ --/* -- * regexp.y -- Regular Expression Matcher Generator -- * (C) 2006, 2007 Andreas Gruenbacher -- * -- * Implementation based on the Lexical Analysis chapter of: -- * Alfred V. Aho, Ravi Sethi, Jeffrey D. Ullman: -- * Compilers: Principles, Techniques, and Tools (The "Dragon Book"), -- * Addison-Wesley, 1986. -- * -- * This program is free software; you can redistribute it and/or modify -- * it under the terms of the GNU General Public License version 2 as -- * published by the Free Software Foundation. -- * -- * See http://www.gnu.org for more details. -- */ -- --%{ -- /* #define DEBUG_TREE */ -- -- #include -- #include -- #include -- #include -- #include -- #include -- #include -- #include -- -- using namespace std; -- -- typedef unsigned char uchar; -- typedef set Chars; -- -- ostream& operator<<(ostream& os, uchar c); -- -- /* Compute the union of two sets. */ -- template -- set operator+(const set& a, const set& b) -- { -- set c(a); -- c.insert(b.begin(), b.end()); -- return c; -- } -- -- /** -- * When creating DFAs from regex trees, a DFA state is constructed from -- * a set of important nodes in the syntax tree. This includes AcceptNodes, -- * which indicate that when a match ends in a particular state, the -- * regular expressions that the AcceptNode belongs to match. -- */ -- class ImportantNode; -- typedef set NodeSet; -- -- /** -- * Out-edges from a state to another: we store the follow-set of Nodes -- * for each input character that is not a default match in -- * cases (i.e., following a CharNode or CharSetNode), and default -- * matches in otherwise as well as in all matching explicit cases -- * (i.e., following an AnyCharNode or NotCharSetNode). This avoids -- * enumerating all the explicit tranitions for default matches. -- */ -- typedef struct NodeCases { -- typedef map::iterator iterator; -- iterator begin() { return cases.begin(); } -- iterator end() { return cases.end(); } -- -- NodeCases() : otherwise(0) { } -- map cases; -- NodeSet *otherwise; -- } NodeCases; -- -- -- /* An abstract node in the syntax tree. */ -- class Node { -- public: -- Node() : -- nullable(false) { child[0] = child[1] = 0; } -- Node(Node *left) : -- nullable(false) { child[0] = left; child[1] = 0; } -- Node(Node *left, Node *right) : -- nullable(false) { child[0] = left; child[1] = right; } -- virtual ~Node() -- { -- if (child[0]) -- child[0]->release(); -- if (child[1]) -- child[1]->release(); -- } -- -- /** -- * See the "Dragon Book" for an explanation of nullable, firstpos, -- * lastpos, and followpos. -- */ -- virtual void compute_nullable() { } -- virtual void compute_firstpos() = 0; -- virtual void compute_lastpos() = 0; -- virtual void compute_followpos() { } -- virtual int eq(Node *other) = 0; -- virtual ostream& dump(ostream& os) = 0; -- -- bool nullable; -- NodeSet firstpos, lastpos, followpos; -- /* child 0 is left, child 1 is right */ -- Node *child[2]; -- -- unsigned int label; /* unique number for debug etc */ -- /** -- * We indirectly release Nodes through a virtual function because -- * accept and Eps Nodes are shared, and must be treated specially. -- * We could use full reference counting here but the indirect release -- * is sufficient and has less overhead -- */ -- virtual void release(void) { -- delete this; -- } -- }; -- -- class InnerNode : public Node { -- public: -- InnerNode() : Node() { }; -- InnerNode(Node *left) : Node(left) {}; -- InnerNode(Node *left, Node *right) : Node(left, right) { }; -- }; -- -- class OneChildNode : public InnerNode { -- public: -- OneChildNode(Node *left) : InnerNode(left) { }; -- }; -- -- class TwoChildNode : public InnerNode { -- public: -- TwoChildNode(Node *left, Node *right) : InnerNode(left, right) { }; -- }; -- -- class LeafNode : public Node { -- public: -- LeafNode() : Node() { }; -- -- }; -- -- /* Match nothing (//). */ -- class EpsNode : public LeafNode { -- public: -- EpsNode() : LeafNode() -- { -- nullable = true; -- label = 0; -- } -- void release(void) -- { -- /* don't delete Eps nodes because there is a single static instance -- * shared by all trees. Look for epsnode in the code -- */ -- } -- -- void compute_firstpos() -- { -- } -- void compute_lastpos() -- { -- } -- int eq(Node *other) { -- if (dynamic_cast(other)) -- return 1; -- return 0; -- } -- ostream& dump(ostream& os) -- { -- return os << "[]"; -- } -- }; -- -- /** -- * Leaf nodes in the syntax tree are important to us: they describe the -- * characters that the regular expression matches. We also consider -- * AcceptNodes import: they indicate when a regular expression matches. -- */ -- class ImportantNode : public LeafNode { -- public: -- ImportantNode() : LeafNode() { } -- void compute_firstpos() -- { -- firstpos.insert(this); -- } -- void compute_lastpos() { -- lastpos.insert(this); -- } -- virtual void follow(NodeCases& cases) = 0; -- }; -- -- /* common base class for all the different classes that contain -- * character information. -- */ -- class CNode : public ImportantNode { -- public: -- CNode() : ImportantNode() { } -- -- }; -- -- /* Match one specific character (/c/). */ -- class CharNode : public CNode { -- public: -- CharNode(uchar c) : c(c) { } -- void follow(NodeCases& cases) -- { -- NodeSet **x = &cases.cases[c]; -- if (!*x) { -- if (cases.otherwise) -- *x = new NodeSet(*cases.otherwise); -- else -- *x = new NodeSet; -- } -- (*x)->insert(followpos.begin(), followpos.end()); -- } -- int eq(Node *other) { -- CharNode *o = dynamic_cast(other); -- if (o) { -- return c == o->c; -- } -- return 0; -- } -- ostream& dump(ostream& os) -- { -- return os << c; -- } -- -- uchar c; -- }; -- -- /* Match a set of characters (/[abc]/). */ -- class CharSetNode : public CNode { -- public: -- CharSetNode(Chars& chars) : chars(chars) { } -- void follow(NodeCases& cases) -- { -- for (Chars::iterator i = chars.begin(); i != chars.end(); i++) { -- NodeSet **x = &cases.cases[*i]; -- if (!*x) { -- if (cases.otherwise) -- *x = new NodeSet(*cases.otherwise); -- else -- *x = new NodeSet; -- } -- (*x)->insert(followpos.begin(), followpos.end()); -- } -- } -- int eq(Node *other) { -- CharSetNode *o = dynamic_cast(other); -- if (!o || chars.size() != o->chars.size()) -- return 0; -- -- for (Chars::iterator i = chars.begin(), j = o->chars.begin(); -- i != chars.end() && j != o->chars.end(); -- i++, j++) { -- if (*i != *j) -- return 0; -- } -- return 1; -- } -- ostream& dump(ostream& os) -- { -- os << '['; -- for (Chars::iterator i = chars.begin(); i != chars.end(); i++) -- os << *i; -- return os << ']'; -- } -- -- Chars chars; -- }; -- -- /* Match all except one character (/[^abc]/). */ -- class NotCharSetNode : public CNode { -- public: -- NotCharSetNode(Chars& chars) : chars(chars) { } -- void follow(NodeCases& cases) -- { -- if (!cases.otherwise) -- cases.otherwise = new NodeSet; -- for (Chars::iterator j = chars.begin(); j != chars.end(); j++) { -- NodeSet **x = &cases.cases[*j]; -- if (!*x) -- *x = new NodeSet(*cases.otherwise); -- } -- /** -- * Note: Add to the nonmatching characters after copying away the -- * old otherwise state for the matching characters. -- */ -- cases.otherwise->insert(followpos.begin(), followpos.end()); -- for (NodeCases::iterator i = cases.begin(); i != cases.end(); i++) { -- if (chars.find(i->first) == chars.end()) -- i->second->insert(followpos.begin(), followpos.end()); -- } -- } -- int eq(Node *other) { -- NotCharSetNode *o = dynamic_cast(other); -- if (!o || chars.size() != o->chars.size()) -- return 0; -- -- for (Chars::iterator i = chars.begin(), j = o->chars.begin(); -- i != chars.end() && j != o->chars.end(); -- i++, j++) { -- if (*i != *j) -- return 0; -- } -- return 1; -- } -- ostream& dump(ostream& os) -- { -- os << "[^"; -- for (Chars::iterator i = chars.begin(); i != chars.end(); i++) -- os << *i; -- return os << ']'; -- } -- -- Chars chars; -- }; -- -- /* Match any character (/./). */ -- class AnyCharNode : public CNode { -- public: -- AnyCharNode() { } -- void follow(NodeCases& cases) -- { -- if (!cases.otherwise) -- cases.otherwise = new NodeSet; -- cases.otherwise->insert(followpos.begin(), followpos.end()); -- for (NodeCases::iterator i = cases.begin(); i != cases.end(); i++) -- i->second->insert(followpos.begin(), followpos.end()); -- } -- int eq(Node *other) { -- if (dynamic_cast(other)) -- return 1; -- return 0; -- } -- ostream& dump(ostream& os) { -- return os << "."; -- } -- }; -- -- /** -- * Indicate that a regular expression matches. An AcceptNode itself -- * doesn't match anything, so it will never generate any transitions. -- */ -- class AcceptNode : public ImportantNode { -- public: -- AcceptNode() {} -- void release(void) -- { -- /* don't delete AcceptNode via release as they are shared, -- * and will be deleted when the table the are stored in is deleted -- */ -- } -- -- void follow(NodeCases& cases __attribute__((unused))) -- { -- /* Nothing to follow. */ -- } -- /* requires accept nodes to be common by pointer */ -- int eq(Node *other) { -- if (dynamic_cast(other)) -- return (this == other); -- return 0; -- } -- }; -- -- /* Match a node zero or more times. (This is a unary operator.) */ -- class StarNode : public OneChildNode { -- public: -- StarNode(Node *left) : -- OneChildNode(left) -- { -- nullable = true; -- } -- void compute_firstpos() -- { -- firstpos = child[0]->firstpos; -- } -- void compute_lastpos() -- { -- lastpos = child[0]->lastpos; -- } -- void compute_followpos() -- { -- NodeSet from = child[0]->lastpos, to = child[0]->firstpos; -- for(NodeSet::iterator i = from.begin(); i != from.end(); i++) { -- (*i)->followpos.insert(to.begin(), to.end()); -- } -- } -- int eq(Node *other) { -- if (dynamic_cast(other)) -- return child[0]->eq(other->child[0]); -- return 0; -- } -- ostream& dump(ostream& os) -- { -- os << '('; -- child[0]->dump(os); -- return os << ")*"; -- } -- }; -- -- /* Match a node one or more times. (This is a unary operator.) */ -- class PlusNode : public OneChildNode { -- public: -- PlusNode(Node *left) : -- OneChildNode(left) { } -- void compute_nullable() -- { -- nullable = child[0]->nullable; -- } -- void compute_firstpos() -- { -- firstpos = child[0]->firstpos; -- } -- void compute_lastpos() -- { -- lastpos = child[0]->lastpos; -- } -- void compute_followpos() -- { -- NodeSet from = child[0]->lastpos, to = child[0]->firstpos; -- for(NodeSet::iterator i = from.begin(); i != from.end(); i++) { -- (*i)->followpos.insert(to.begin(), to.end()); -- } -- } -- int eq(Node *other) { -- if (dynamic_cast(other)) -- return child[0]->eq(other->child[0]); -- return 0; -- } -- ostream& dump(ostream& os) -- { -- os << '('; -- child[0]->dump(os); -- return os << ")+"; -- } -- }; -- -- /* Match a pair of consecutive nodes. */ -- class CatNode : public TwoChildNode { -- public: -- CatNode(Node *left, Node *right) : -- TwoChildNode(left, right) { } -- void compute_nullable() -- { -- nullable = child[0]->nullable && child[1]->nullable; -- } -- void compute_firstpos() -- { -- if (child[0]->nullable) -- firstpos = child[0]->firstpos + child[1]->firstpos; -- else -- firstpos = child[0]->firstpos; -- } -- void compute_lastpos() -- { -- if (child[1]->nullable) -- lastpos = child[0]->lastpos + child[1]->lastpos; -- else -- lastpos = child[1]->lastpos; -- } -- void compute_followpos() -- { -- NodeSet from = child[0]->lastpos, to = child[1]->firstpos; -- for(NodeSet::iterator i = from.begin(); i != from.end(); i++) { -- (*i)->followpos.insert(to.begin(), to.end()); -- } -- } -- int eq(Node *other) { -- if (dynamic_cast(other)) { -- if (!child[0]->eq(other->child[0])) -- return 0; -- return child[1]->eq(other->child[1]); -- } -- return 0; -- } -- ostream& dump(ostream& os) -- { -- child[0]->dump(os); -- child[1]->dump(os); -- return os; -- //return os << ' '; -- } -- }; -- -- /* Match one of two alternative nodes. */ -- class AltNode : public TwoChildNode { -- public: -- AltNode(Node *left, Node *right) : -- TwoChildNode(left, right) { } -- void compute_nullable() -- { -- nullable = child[0]->nullable || child[1]->nullable; -- } -- void compute_lastpos() -- { -- lastpos = child[0]->lastpos + child[1]->lastpos; -- } -- void compute_firstpos() -- { -- firstpos = child[0]->firstpos + child[1]->firstpos; -- } -- int eq(Node *other) { -- if (dynamic_cast(other)) { -- if (!child[0]->eq(other->child[0])) -- return 0; -- return child[1]->eq(other->child[1]); -- } -- return 0; -- } -- ostream& dump(ostream& os) -- { -- os << '('; -- child[0]->dump(os); -- os << '|'; -- child[1]->dump(os); -- os << ')'; -- return os; -- } -- }; -- --/* Use a single static EpsNode as it carries no node specific information */ --static EpsNode epsnode; -- --/* -- * Normalize the regex parse tree for factoring and cancelations. Normalization -- * reorganizes internal (alt and cat) nodes into a fixed "normalized" form that -- * simplifies factoring code, in that it produces a canonicalized form for -- * the direction being normalized so that the factoring code does not have -- * to consider as many cases. -- * -- * left normalization (dir == 0) uses these rules -- * (E | a) -> (a | E) -- * (a | b) | c -> a | (b | c) -- * (ab)c -> a(bc) -- * -- * right normalization (dir == 1) uses the same rules but reversed -- * (a | E) -> (E | a) -- * a | (b | c) -> (a | b) | c -- * a(bc) -> (ab)c -- * -- * Note: This is written iteratively for a given node (the top node stays -- * fixed and the children are rotated) instead of recursively. -- * For a given node under examination rotate over nodes from -- * dir to !dir. Until no dir direction node meets the criterial. -- * Then recurse to the children (which will have a different node type) -- * to make sure they are normalized. -- * Normalization of a child node is guarenteed to not affect the -- * normalization of the parent. -- * -- * For cat nodes the depth first traverse order is guarenteed to be -- * maintained. This is not necessary for altnodes. -- * -- * Eg. For left normalization -- * -- * |1 |1 -- * / \ / \ -- * |2 T -> a |2 -- * / \ / \ -- * |3 c b |3 -- * / \ / \ -- * a b c T -- * -- */ --static void rotate_node(Node *t, int dir) { -- // (a | b) | c -> a | (b | c) -- // (ab)c -> a(bc) -- Node *left = t->child[dir]; -- t->child[dir] = left->child[dir]; -- left->child[dir] = left->child[!dir]; -- left->child[!dir] = t->child[!dir]; -- t->child[!dir] = left; --} -- --void normalize_tree(Node *t, int dir) --{ -- if (dynamic_cast(t)) -- return; -- -- for (;;) { -- if ((&epsnode == t->child[dir]) && -- (&epsnode != t->child[!dir]) && -- dynamic_cast(t)) { -- // (E | a) -> (a | E) -- // Ea -> aE -- Node *c = t->child[dir]; -- t->child[dir] = t->child[!dir]; -- t->child[!dir] = c; -- // Don't break here as 'a' may be a tree that -- // can be pulled up. -- } else if ((dynamic_cast(t) && -- dynamic_cast(t->child[dir])) || -- (dynamic_cast(t) && -- dynamic_cast(t->child[dir]))) { -- // (a | b) | c -> a | (b | c) -- // (ab)c -> a(bc) -- rotate_node(t, dir); -- } else if (dynamic_cast(t) && -- dynamic_cast(t->child[dir]) && -- dynamic_cast(t->child[!dir])) { -- // [a] | b -> b | [a] -- Node *c = t->child[dir]; -- t->child[dir] = t->child[!dir]; -- t->child[!dir] = c; -- } else { -- break; -- } -- } -- if (t->child[dir]) -- normalize_tree(t->child[dir], dir); -- if (t->child[!dir]) -- normalize_tree(t->child[!dir], dir); --} -- --//charset conversion is disabled for now, --//it hinders tree optimization in some cases, so it need to be either --//done post optimization, or have extra factoring rules added --#if 0 --static Node *merge_charset(Node *a, Node *b) --{ -- if (dynamic_cast(a) && -- dynamic_cast(b)) { -- Chars chars; -- chars.insert(dynamic_cast(a)->c); -- chars.insert(dynamic_cast(b)->c); -- CharSetNode *n = new CharSetNode(chars); -- return n; -- } else if (dynamic_cast(a) && -- dynamic_cast(b)) { -- Chars *chars = &dynamic_cast(b)->chars; -- chars->insert(dynamic_cast(a)->c); -- return b; -- } else if (dynamic_cast(a) && -- dynamic_cast(b)) { -- Chars *from = &dynamic_cast(a)->chars; -- Chars *to = &dynamic_cast(b)->chars; -- for (Chars::iterator i = from->begin(); i != from->end(); i++) -- to->insert(*i); -- return b; -- } -- -- //return ???; --} -- --static Node *alt_to_charsets(Node *t, int dir) --{ --/* -- Node *first = NULL; -- Node *p = t; -- Node *i = t; -- for (;dynamic_cast(i);) { -- if (dynamic_cast(i->child[dir]) || -- dynamic_cast(i->child[dir])) { -- if (!first) { -- first = i; -- p = i; -- i = i->child[!dir]; -- } else { -- first->child[dir] = merge_charset(first->child[dir], -- i->child[dir]); -- p->child[!dir] = i->child[!dir]; -- Node *tmp = i; -- i = tmp->child[!dir]; -- tmp->child[!dir] = NULL; -- tmp->release(); -- } -- } else { -- p = i; -- i = i->child[!dir]; -- } -- } -- // last altnode of chain check other dir as well -- if (first && (dynamic_cast(i) || -- dynamic_cast(i))) { -- -- } --*/ -- --/* -- if (dynamic_cast(t->child[dir]) || -- dynamic_cast(t->child[dir])) -- char_test = true; -- (char_test && -- (dynamic_cast(i->child[dir]) || -- dynamic_cast(i->child[dir])))) { --*/ -- return t; --} --#endif -- --static Node *basic_alt_factor(Node *t, int dir) --{ -- if (!dynamic_cast(t)) -- return t; -- -- if (t->child[dir]->eq(t->child[!dir])) { -- // (a | a) -> a -- Node *tmp = t->child[dir]; -- t->child[dir] = NULL; -- t->release(); -- return tmp; -- } -- -- // (ab) | (ac) -> a(b|c) -- if (dynamic_cast(t->child[dir]) && -- dynamic_cast(t->child[!dir]) && -- t->child[dir]->child[dir]->eq(t->child[!dir]->child[dir])) { -- // (ab) | (ac) -> a(b|c) -- Node *left = t->child[dir]; -- Node *right = t->child[!dir]; -- t->child[dir] = left->child[!dir]; -- t->child[!dir] = right->child[!dir]; -- right->child[!dir] = NULL; -- right->release(); -- left->child[!dir] = t; -- return left; -- } -- -- // a | (ab) -> a (E | b) -> a (b | E) -- if (dynamic_cast(t->child[!dir]) && -- t->child[dir]->eq(t->child[!dir]->child[dir])) { -- Node *c = t->child[!dir]; -- t->child[dir]->release(); -- t->child[dir] = c->child[!dir]; -- t->child[!dir] = &epsnode; -- c->child[!dir] = t; -- return c; -- } -- -- // ab | (a) -> a (b | E) -- if (dynamic_cast(t->child[dir]) && -- t->child[dir]->child[dir]->eq(t->child[!dir])) { -- Node *c = t->child[dir]; -- t->child[!dir]->release(); -- t->child[dir] = c->child[!dir]; -- t->child[!dir] = &epsnode; -- c->child[!dir] = t; -- return c; -- } -- -- return t; --} -- --static Node *basic_simplify(Node *t, int dir) --{ -- if (dynamic_cast(t) && -- &epsnode == t->child[!dir]) { -- // aE -> a -- Node *tmp = t->child[dir]; -- t->child[dir] = NULL; -- t->release(); -- return tmp; -- } -- -- return basic_alt_factor(t, dir); --} -- --/* -- * assumes a normalized tree. reductions shown for left normalization -- * aE -> a -- * (a | a) -> a -- ** factoring patterns -- * a | (a | b) -> (a | b) -- * a | (ab) -> a (E | b) -> a (b | E) -- * (ab) | (ac) -> a(b|c) -- * -- * returns t - if no simplifications were made -- * a new root node - if simplifications were made -- */ --Node *simplify_tree_base(Node *t, int dir, bool &mod) --{ -- if (dynamic_cast(t)) -- return t; -- -- for (int i=0; i < 2; i++) { -- if (t->child[i]) { -- Node *c = simplify_tree_base(t->child[i], dir, mod); -- if (c != t->child[i]) { -- t->child[i] = c; -- mod = true; -- } -- } -- } -- -- // only iterate on loop if modification made -- for (;; mod = true) { -- -- Node *tmp = basic_simplify(t, dir); -- if (tmp != t) { -- t = tmp; -- continue; -- } -- -- -- /* all tests after this must meet 2 alt node condition */ -- if (!dynamic_cast(t) || -- !dynamic_cast(t->child[!dir])) -- break; -- -- // a | (a | b) -> (a | b) -- // a | (b | (c | a)) -> (b | (c | a)) -- Node *p = t; -- Node *i = t->child[!dir]; -- for (;dynamic_cast(i); p = i, i = i->child[!dir]) { -- if (t->child[dir]->eq(i->child[dir])) { -- Node *tmp = t->child[!dir]; -- t->child[!dir] = NULL; -- t->release(); -- t = tmp; -- continue; -- } -- } -- // last altnode of chain check other dir as well -- if (t->child[dir]->eq(p->child[!dir])) { -- Node *tmp = t->child[!dir]; -- t->child[!dir] = NULL; -- t->release(); -- t = tmp; -- continue; -- } -- -- //exact match didn't work, try factoring front -- //a | (ac | (ad | () -> (a (E | c)) | (...) -- //ab | (ac | (...)) -> (a (b | c)) | (...) -- //ab | (a | (...)) -> (a (b | E)) | (...) -- Node *pp; -- int count = 0; -- Node *subject = t->child[dir]; -- Node *a = subject; -- if (dynamic_cast(subject)) -- a = subject->child[dir]; -- -- for (pp = p = t, i = t->child[!dir]; -- dynamic_cast(i); ) { -- if ((dynamic_cast(i->child[dir]) && -- a->eq(i->child[dir]->child[dir])) || -- (a->eq(i->child[dir]))) { -- // extract matching alt node -- p->child[!dir] = i->child[!dir]; -- i->child[!dir] = subject; -- subject = basic_simplify(i, dir); -- if (dynamic_cast(subject)) -- a = subject->child[dir]; -- else -- a = subject; -- -- i = p->child[!dir]; -- count++; -- } else { -- pp = p; p = i; i = i->child[!dir]; -- } -- } -- -- // last altnode in chain check other dir as well -- if ((dynamic_cast(i) && -- a->eq(i->child[dir])) || -- (a->eq(i))) { -- count++; -- if (t == p) { -- t->child[dir] = subject; -- t = basic_simplify(t, dir); -- } else { -- t->child[dir] = p->child[dir]; -- p->child[dir] = subject; -- pp->child[!dir] = basic_simplify(p, dir); -- } -- } else { -- t->child[dir] = i; -- p->child[!dir] = subject; -- } -- -- if (count == 0) -- break; -- } -- return t; --} -- --int debug_tree(Node *t) --{ -- int nodes = 1; -- -- if (!dynamic_cast(t)) { -- if (t->child[0]) -- nodes += debug_tree(t->child[0]); -- if (t->child[1]) -- nodes += debug_tree(t->child[1]); -- } -- return nodes; --} -- --struct node_counts { -- int charnode; -- int charset; -- int notcharset; -- int alt; -- int plus; -- int star; -- int any; -- int cat; --}; -- -- --static void count_tree_nodes(Node *t, struct node_counts *counts) --{ -- if (dynamic_cast(t)) { -- counts->alt++; -- count_tree_nodes(t->child[0], counts); -- count_tree_nodes(t->child[1], counts); -- } else if (dynamic_cast(t)) { -- counts->cat++; -- count_tree_nodes(t->child[0], counts); -- count_tree_nodes(t->child[1], counts); -- } else if (dynamic_cast(t)) { -- counts->plus++; -- count_tree_nodes(t->child[0], counts); -- } else if (dynamic_cast(t)) { -- counts->star++; -- count_tree_nodes(t->child[0], counts); -- } else if (dynamic_cast(t)) { -- counts->charnode++; -- } else if (dynamic_cast(t)) { -- counts->any++; -- } else if (dynamic_cast(t)) { -- counts->charset++; -- } else if (dynamic_cast(t)) { -- counts->notcharset++; -- } --} -- --#include "stdio.h" --#include "stdint.h" --#include "apparmor_re.h" -- --Node *simplify_tree(Node *t, dfaflags_t flags) --{ -- bool update; -- -- if (flags & DFA_DUMP_TREE_STATS) { -- struct node_counts counts = { 0, 0, 0, 0, 0, 0, 0, 0 }; -- count_tree_nodes(t, &counts); -- fprintf(stderr, "expr tree: c %d, [] %d, [^] %d, | %d, + %d, * %d, . %d, cat %d\n", counts.charnode, counts.charset, counts.notcharset, counts.alt, counts.plus, counts.star, counts.any, counts.cat); -- } -- do { -- update = false; -- //default to right normalize first as this reduces the number -- //of trailing nodes which might follow an internal * -- //or **, which is where state explosion can happen -- //eg. in one test this makes the difference between -- // the dfa having about 7 thousands states, -- // and it having about 1.25 million states -- int dir = 1; -- if (flags & DFA_CONTROL_TREE_LEFT) -- dir = 0; -- for (int count = 0; count < 2; count++) { -- bool modified; -- do { -- modified = false; -- if (flags & DFA_CONTROL_TREE_NORMAL) -- normalize_tree(t, dir); -- t = simplify_tree_base(t, dir, modified); -- if (modified) -- update = true; -- } while (modified); -- if (flags & DFA_CONTROL_TREE_LEFT) -- dir++; -- else -- dir--; -- } -- } while(update); -- if (flags & DFA_DUMP_TREE_STATS) { -- struct node_counts counts = { 0, 0, 0, 0, 0, 0, 0, 0 }; -- count_tree_nodes(t, &counts); -- fprintf(stderr, "simplified expr tree: c %d, [] %d, [^] %d, | %d, + %d, * %d, . %d, cat %d\n", counts.charnode, counts.charset, counts.notcharset, counts.alt, counts.plus, counts.star, counts.any, counts.cat); -- } -- return t; --} -- -- --%} -- --%union { -- char c; -- Node *node; -- Chars *cset; --} -- --%{ -- void regexp_error(Node **, const char *, const char *); --# define YYLEX_PARAM &text -- int regexp_lex(YYSTYPE *, const char **); -- -- static inline Chars* -- insert_char(Chars* cset, uchar a) -- { -- cset->insert(a); -- return cset; -- } -- -- static inline Chars* -- insert_char_range(Chars* cset, uchar a, uchar b) -- { -- if (a > b) -- swap(a, b); -- for (uchar i = a; i <= b; i++) -- cset->insert(i); -- return cset; -- } --%} -- --%pure-parser --/* %error-verbose */ --%parse-param {Node **root} --%parse-param {const char *text} --%name-prefix = "regexp_" -- --%token CHAR --%type regex_char cset_char1 cset_char cset_charN --%type charset cset_chars --%type regexp expr terms0 terms qterm term -- --/** -- * Note: destroy all nodes upon failure, but *not* the start symbol once -- * parsing succeeds! -- */ --%destructor { $$->release(); } expr terms0 terms qterm term -- --%% -- --/* FIXME: Does not parse "[--]", "[---]", "[^^-x]". I don't actually know -- which precise grammer Perl regexps use, and rediscovering that -- is proving to be painful. */ -- --regexp : /* empty */ { *root = $$ = &epsnode; } -- | expr { *root = $$ = $1; } -- ; -- --expr : terms -- | expr '|' terms0 { $$ = new AltNode($1, $3); } -- | '|' terms0 { $$ = new AltNode(&epsnode, $2); } -- ; -- --terms0 : /* empty */ { $$ = &epsnode; } -- | terms -- ; -- --terms : qterm -- | terms qterm { $$ = new CatNode($1, $2); } -- ; -- --qterm : term -- | term '*' { $$ = new StarNode($1); } -- | term '+' { $$ = new PlusNode($1); } -- ; -- --term : '.' { $$ = new AnyCharNode; } -- | regex_char { $$ = new CharNode($1); } -- | '[' charset ']' { $$ = new CharSetNode(*$2); -- delete $2; } -- | '[' '^' charset ']' -- { $$ = new NotCharSetNode(*$3); -- delete $3; } -- | '[' '^' '^' cset_chars ']' -- { $4->insert('^'); -- $$ = new NotCharSetNode(*$4); -- delete $4; } -- | '(' regexp ')' { $$ = $2; } -- ; -- --regex_char : CHAR -- | '^' { $$ = '^'; } -- | '-' { $$ = '-'; } -- | ']' { $$ = ']'; } -- ; -- --charset : cset_char1 cset_chars -- { $$ = insert_char($2, $1); } -- | cset_char1 '-' cset_charN cset_chars -- { $$ = insert_char_range($4, $1, $3); } -- ; -- --cset_chars : /* nothing */ { $$ = new Chars; } -- | cset_chars cset_charN -- { $$ = insert_char($1, $2); } -- | cset_chars cset_charN '-' cset_charN -- { $$ = insert_char_range($1, $2, $4); } -- ; -- --cset_char1 : cset_char -- | ']' { $$ = ']'; } -- | '-' { $$ = '-'; } -- ; -- --cset_charN : cset_char -- | '^' { $$ = '^'; } -- ; -- --cset_char : CHAR -- | '[' { $$ = '['; } -- | '*' { $$ = '*'; } -- | '+' { $$ = '+'; } -- | '.' { $$ = '.'; } -- | '|' { $$ = '|'; } -- | '(' { $$ = '('; } -- | ')' { $$ = ')'; } -- ; -- --%% -- --#include --#include --#include --#include -- --#include --#include -- --#include "../immunix.h" -- --/* Traverse the syntax tree depth-first in an iterator-like manner. */ --class depth_first_traversal { -- stack pos; -- void push_left(Node *node) -- { -- pos.push(node); -- -- while (dynamic_cast(node)) { -- pos.push(node->child[0]); -- node = node->child[0]; -- } -- } -- --public: -- depth_first_traversal(Node *node) { -- push_left(node); -- } -- Node *operator*() -- { -- return pos.top(); -- } -- Node* operator->() -- { -- return pos.top(); -- } -- operator bool() -- { -- return !pos.empty(); -- } -- void operator++(int) -- { -- Node *last = pos.top(); -- pos.pop(); -- -- if (!pos.empty()) { -- /* no need to dynamic cast, as we just popped a node so the top node -- * must be an inner node */ -- InnerNode *node = (InnerNode *)(pos.top()); -- -- if (node->child[1] && node->child[1] != last) { -- push_left(node->child[1]); -- } -- } -- } --}; -- --ostream& operator<<(ostream& os, Node& node) --{ -- node.dump(os); -- return os; --} -- --ostream& operator<<(ostream& os, uchar c) --{ -- const char *search = "\a\033\f\n\r\t|*+[](). ", -- *replace = "aefnrt|*+[](). ", *s; -- -- if ((s = strchr(search, c)) && *s != '\0') -- os << '\\' << replace[s - search]; -- else if (c < 32 || c >= 127) -- os << '\\' << '0' << char('0' + (c >> 6)) -- << char('0' + ((c >> 3) & 7)) << char('0' + (c & 7)); -- else -- os << (char)c; -- return os; --} -- --int --octdigit(char c) --{ -- if (c >= '0' && c <= '7') -- return c - '0'; -- return -1; --} -- --int --hexdigit(char c) --{ -- if (c >= '0' && c <= '9') -- return c - '0'; -- else if (c >= 'A' && c <= 'F') -- return 10 + c - 'A'; -- else if (c >= 'a' && c <= 'f') -- return 10 + c - 'A'; -- else -- return -1; --} -- --int --regexp_lex(YYSTYPE *val, const char **pos) --{ -- int c; -- -- val->c = **pos; -- switch(*(*pos)++) { -- case '\0': -- (*pos)--; -- return 0; -- -- case '*': case '+': case '.': case '|': case '^': case '-': -- case '[': case ']': case '(' : case ')': -- return *(*pos - 1); -- -- case '\\': -- val->c = **pos; -- switch(*(*pos)++) { -- case '\0': -- (*pos)--; -- /* fall through */ -- case '\\': -- val->c = '\\'; -- break; -- -- case '0': -- val->c = 0; -- if ((c = octdigit(**pos)) >= 0) { -- val->c = c; -- (*pos)++; -- } -- if ((c = octdigit(**pos)) >= 0) { -- val->c = (val->c << 3) + c; -- (*pos)++; -- } -- if ((c = octdigit(**pos)) >= 0) { -- val->c = (val->c << 3) + c; -- (*pos)++; -- } -- break; -- -- case 'x': -- val->c = 0; -- if ((c = hexdigit(**pos)) >= 0) { -- val->c = c; -- (*pos)++; -- } -- if ((c = hexdigit(**pos)) >= 0) { -- val->c = (val->c << 4) + c; -- (*pos)++; -- } -- break; -- -- case 'a': -- val->c = '\a'; -- break; -- -- case 'e': -- val->c = 033 /* ESC */; -- break; -- -- case 'f': -- val->c = '\f'; -- break; -- -- case 'n': -- val->c = '\n'; -- break; -- -- case 'r': -- val->c = '\r'; -- break; -- -- case 't': -- val->c = '\t'; -- break; -- } -- } -- return CHAR; --} -- --void --regexp_error(Node ** __attribute__((unused)), -- const char *text __attribute__((unused)), -- const char *error __attribute__((unused))) --{ -- /* We don't want the library to print error messages. */ --} -- --/** -- * Assign a consecutive number to each node. This is only needed for -- * pretty-printing the debug output. -- * -- * The epsnode is labeled 0. Start labeling at 1 -- */ --void label_nodes(Node *root) --{ -- int nodes = 1; -- for (depth_first_traversal i(root); i; i++) -- i->label = nodes++; --} -- --/** -- * Text-dump a state (for debugging). -- */ --ostream& operator<<(ostream& os, const NodeSet& state) --{ -- os << '{'; -- if (!state.empty()) { -- NodeSet::iterator i = state.begin(); -- for(;;) { -- os << (*i)->label; -- if (++i == state.end()) -- break; -- os << ','; -- } -- } -- os << '}'; -- return os; --} -- --/** -- * Text-dump the syntax tree (for debugging). -- */ --void dump_syntax_tree(ostream& os, Node *node) { -- for (depth_first_traversal i(node); i; i++) { -- os << i->label << '\t'; -- if ((*i)->child[0] == 0) -- os << **i << '\t' << (*i)->followpos << endl; -- else { -- if ((*i)->child[1] == 0) -- os << (*i)->child[0]->label << **i; -- else -- os << (*i)->child[0]->label << **i -- << (*i)->child[1]->label; -- os << '\t' << (*i)->firstpos -- << (*i)->lastpos << endl; -- } -- } -- os << endl; --} -- --/* Comparison operator for sets of . -- * Compare set hashes, and if the sets have the same hash -- * do compare pointer comparison on set of , the pointer comparison -- * allows us to determine which Sets of we have seen already from -- * new ones when constructing the DFA. -- */ --struct deref_less_than { -- bool operator()(pair const & lhs, pair const & rhs) const -- { -- if (lhs.first == rhs.first) -- return *(lhs.second) < *(rhs.second); -- else -- return lhs.first < rhs.first; -- } --}; -- --unsigned long hash_NodeSet(const NodeSet *ns) --{ -- unsigned long hash = 5381; -- -- for (NodeSet::iterator i = ns->begin(); i != ns->end(); i++) { -- hash = ((hash << 5) + hash) + (unsigned long) *i; -- } -- -- return hash; --} -- --class State; --/** -- * State cases are identical to NodesCases except they map to State * -- * instead of NodeSet. -- * Out-edges from a state to another: we store the follow State -- * for each input character that is not a default match in cases and -- * default matches in otherwise as well as in all matching explicit cases -- * This avoids enumerating all the explicit tranitions for default matches. -- */ --typedef struct Cases { -- typedef map::iterator iterator; -- iterator begin() { return cases.begin(); } -- iterator end() { return cases.end(); } -- -- Cases() : otherwise(0) { } -- map cases; -- State *otherwise; --} Cases; -- --typedef list Partition; -- --uint32_t accept_perms(NodeSet *state, uint32_t *audit_ctl, int *error); -- --/* -- * State - DFA individual state information -- * label: a unique label to identify the state used for pretty printing -- * the non-matching state is setup to have label == 0 and -- * the start state is setup to have label == 1 -- * audit: the audit permission mask for the state -- * accept: the accept permissions for the state -- * cases: set of transitions from this state -- * parition: Is a temporary work variable used during dfa minimization. -- * it can be replaced with a map, but that is slower and uses more -- * memory. -- * nodes: Is a temporary work variable used during dfa creation. It can -- * be replaced by using the nodemap, but that is slower -- */ --class State { --public: -- State() : label (0), audit(0), accept(0), cases(), nodes(NULL) { }; -- State(int l): label (l), audit(0), accept(0), cases(), nodes(NULL) { }; -- State(int l, NodeSet *n) throw (int): -- label(l), audit(0), accept(0), cases(), nodes(n) -- { -- int error; -- -- /* Compute permissions associated with the State. */ -- accept = accept_perms(nodes, &audit, &error); -- if (error) { --cerr << "Failing on accept perms " << error << "\n"; -- throw error; -- } -- }; -- -- int label; -- uint32_t audit, accept; -- Cases cases; -- union { -- Partition *partition; -- NodeSet *nodes; -- }; --}; -- --ostream& operator<<(ostream& os, const State& state) --{ -- /* dump the state label */ -- os << '{'; -- os << state.label; -- os << '}'; -- return os; --} -- --typedef map, State *, deref_less_than > NodeMap; --/* Transitions in the DFA. */ -- --/* dfa_stats - structure to group various stats about dfa creation -- * duplicates - how many duplicate NodeSets where encountered and discarded -- * proto_max - maximum length of a NodeSet encountered during dfa construction -- * proto_sum - sum of NodeSet length during dfa construction. Used to find -- * average length. -- */ --typedef struct dfa_stats { -- unsigned int duplicates, proto_max, proto_sum; --} dfa_stats_t; -- --class DFA { -- void dump_node_to_dfa(void); -- State* add_new_state(NodeMap &nodemap, pair index, NodeSet *nodes, dfa_stats_t &stats); -- void update_state_transitions(NodeMap &nodemap, list &work_queue, State *state, dfa_stats_t &stats); -- State *find_target_state(NodeMap &nodemap, list &work_queue, -- NodeSet *nodes, dfa_stats_t &stats); --public: -- DFA(Node *root, dfaflags_t flags); -- virtual ~DFA(); -- void remove_unreachable(dfaflags_t flags); -- bool same_mappings(State *s1, State *s2); -- size_t hash_trans(State *s); -- void minimize(dfaflags_t flags); -- void dump(ostream& os); -- void dump_dot_graph(ostream& os); -- void dump_uniq_perms(const char *s); -- map equivalence_classes(dfaflags_t flags); -- void apply_equivalence_classes(map& eq); -- Node *root; -- State *nonmatching, *start; -- Partition states; --}; -- --State* DFA::add_new_state(NodeMap &nodemap, pair index, NodeSet *nodes, dfa_stats_t &stats) --{ -- State *state = new State(nodemap.size(), nodes); -- states.push_back(state); -- nodemap.insert(make_pair(index, state)); -- stats.proto_sum += nodes->size(); -- if (nodes->size() > stats.proto_max) -- stats.proto_max = nodes->size(); -- return state; --} -- --State *DFA::find_target_state(NodeMap &nodemap, list &work_queue, -- NodeSet *nodes, dfa_stats_t &stats) --{ -- State *target; -- -- pair index = make_pair(hash_NodeSet(nodes), nodes); -- -- map, State *, deref_less_than>::iterator x = nodemap.find(index); -- -- if (x == nodemap.end()) { -- /* set of nodes isn't known so create new state, and nodes to -- * state mapping -- */ -- target = add_new_state(nodemap, index, nodes, stats); -- work_queue.push_back(target); -- } else { -- /* set of nodes already has a mapping so free this one */ -- stats.duplicates++; -- delete (nodes); -- target = x->second; -- } -- -- return target; --} -- --void DFA::update_state_transitions(NodeMap &nodemap, -- list &work_queue, State *state, -- dfa_stats_t &stats) --{ -- /* Compute possible transitions for state->nodes. This is done by -- * iterating over all the nodes in state->nodes and combining the -- * transitions. -- * -- * The resultant transition set is a mapping of characters to -- * sets of nodes. -- */ -- NodeCases cases; -- for (NodeSet::iterator i = state->nodes->begin(); i != state->nodes->end(); i++) -- (*i)->follow(cases); -- -- /* Now for each set of nodes in the computed transitions, make -- * sure that there is a state that maps to it, and add the -- * matching case to the state. -- */ -- -- /* check the default transition first */ -- if (cases.otherwise) -- state->cases.otherwise = find_target_state(nodemap, work_queue, -- cases.otherwise, -- stats);; -- -- /* For each transition from *from, check if the set of nodes it -- * transitions to already has been mapped to a state -- */ -- for (NodeCases::iterator j = cases.begin(); j != cases.end(); j++) { -- State *target; -- target = find_target_state(nodemap, work_queue, j->second, -- stats); -- -- /* Don't insert transition that the default transition -- * already covers -- */ -- if (target != state->cases.otherwise) -- state->cases.cases[j->first] = target; -- } --} -- -- --/* WARNING: This routine can only be called from within DFA creation as -- * the nodes value is only valid during dfa construction. -- */ --void DFA::dump_node_to_dfa(void) --{ -- cerr << "Mapping of States to expr nodes\n" -- " State <= Nodes\n" -- "-------------------\n"; -- for (Partition::iterator i = states.begin(); i != states.end(); i++) -- cerr << " " << (*i)->label << " <= " << *(*i)->nodes << "\n"; --} -- --/** -- * Construct a DFA from a syntax tree. -- */ --DFA::DFA(Node *root, dfaflags_t flags) : root(root) --{ -- dfa_stats_t stats = { 0, 0, 0 }; -- int i = 0; -- -- if (flags & DFA_DUMP_PROGRESS) -- fprintf(stderr, "Creating dfa:\r"); -- -- for (depth_first_traversal i(root); i; i++) { -- (*i)->compute_nullable(); -- (*i)->compute_firstpos(); -- (*i)->compute_lastpos(); -- } -- -- if (flags & DFA_DUMP_PROGRESS) -- fprintf(stderr, "Creating dfa: followpos\r"); -- for (depth_first_traversal i(root); i; i++) { -- (*i)->compute_followpos(); -- } -- -- NodeMap nodemap; -- NodeSet *emptynode = new NodeSet; -- nonmatching = add_new_state(nodemap, -- make_pair(hash_NodeSet(emptynode), emptynode), -- emptynode, stats); -- -- NodeSet *first = new NodeSet(root->firstpos); -- start = add_new_state(nodemap, make_pair(hash_NodeSet(first), first), -- first, stats); -- -- /* the work_queue contains the states that need to have their -- * transitions computed. This could be done with a recursive -- * algorithm instead of a work_queue, but it would be slightly slower -- * and consume more memory. -- * -- * TODO: currently the work_queue is treated in a breadth first -- * search manner. Test using the work_queue in a depth first -- * manner, this may help reduce the number of entries on the -- * work_queue at any given time, thus reducing peak memory use. -- */ -- list work_queue; -- work_queue.push_back(start); -- -- while (!work_queue.empty()) { -- if (i % 1000 == 0 && (flags & DFA_DUMP_PROGRESS)) -- fprintf(stderr, "\033[2KCreating dfa: queue %ld\tstates %ld\teliminated duplicates %d\r", work_queue.size(), states.size(), stats.duplicates); -- i++; -- -- State *from = work_queue.front(); -- work_queue.pop_front(); -- -- /* Update 'from's transitions, and if it transitions to any -- * unknown State create it and add it to the work_queue -- */ -- update_state_transitions(nodemap, work_queue, from, stats); -- -- } /* for (NodeSet *nodes ... */ -- -- /* cleanup Sets of nodes used computing the DFA as they are no longer -- * needed. -- */ -- for (depth_first_traversal i(root); i; i++) { -- (*i)->firstpos.clear(); -- (*i)->lastpos.clear(); -- (*i)->followpos.clear(); -- } -- -- if (flags & DFA_DUMP_NODE_TO_DFA) -- dump_node_to_dfa(); -- -- for (NodeMap::iterator i = nodemap.begin(); i != nodemap.end(); i++) -- delete i->first.second; -- nodemap.clear(); -- -- if (flags & (DFA_DUMP_STATS)) -- fprintf(stderr, "\033[2KCreated dfa: states %ld,\teliminated duplicates %d,\tprotostate sets: longest %u, avg %u\n", states.size(), stats.duplicates, stats.proto_max, (unsigned int) (stats.proto_sum/states.size())); -- --} -- -- --DFA::~DFA() --{ -- for (Partition::iterator i = states.begin(); i != states.end(); i++) -- delete *i; --} -- --class MatchFlag : public AcceptNode { --public: --MatchFlag(uint32_t flag, uint32_t audit) : flag(flag), audit(audit) {} -- ostream& dump(ostream& os) -- { -- return os << '<' << flag << '>'; -- } -- -- uint32_t flag; -- uint32_t audit; -- }; -- --class ExactMatchFlag : public MatchFlag { --public: -- ExactMatchFlag(uint32_t flag, uint32_t audit) : MatchFlag(flag, audit) {} --}; -- --class DenyMatchFlag : public MatchFlag { --public: -- DenyMatchFlag(uint32_t flag, uint32_t quiet) : MatchFlag(flag, quiet) {} --}; -- -- --void DFA::dump_uniq_perms(const char *s) --{ -- set < pair > uniq; -- for (Partition::iterator i = states.begin(); i != states.end(); i++) -- uniq.insert(make_pair((*i)->accept, (*i)->audit)); -- -- cerr << "Unique Permission sets: " << s << " (" << uniq.size() << ")\n"; -- cerr << "----------------------\n"; -- for (set< pair >::iterator i = uniq.begin(); -- i != uniq.end(); i++) { -- cerr << " " << hex << i->first << " " << i->second << dec <<"\n"; -- } --} -- -- --/* Remove dead or unreachable states */ --void DFA::remove_unreachable(dfaflags_t flags) --{ -- set reachable; -- list work_queue; -- -- /* find the set of reachable states */ -- reachable.insert(nonmatching); -- work_queue.push_back(start); -- while (!work_queue.empty()) { -- State *from = work_queue.front(); -- work_queue.pop_front(); -- reachable.insert(from); -- -- if (from->cases.otherwise && -- (reachable.find(from->cases.otherwise) == reachable.end())) -- work_queue.push_back(from->cases.otherwise); -- -- for (Cases::iterator j = from->cases.begin(); -- j != from->cases.end(); j++) { -- if (reachable.find(j->second) == reachable.end()) -- work_queue.push_back(j->second); -- } -- } -- -- /* walk the set of states and remove any that aren't reachable */ -- if (reachable.size() < states.size()) { -- int count = 0; -- Partition::iterator i; -- Partition::iterator next; -- for (i = states.begin(); i != states.end(); i = next) { -- next = i; -- next++; -- if (reachable.find(*i) == reachable.end()) { -- if (flags & DFA_DUMP_UNREACHABLE) { -- cerr << "unreachable: "<< **i; -- if (*i == start) -- cerr << " <=="; -- if ((*i)->accept) { -- cerr << " (0x" << hex << (*i)->accept -- << " " << (*i)->audit << dec << ')'; -- } -- cerr << endl; -- } -- State *current = *i; -- states.erase(i); -- delete(current); -- count++; -- } -- } -- -- if (count && (flags & DFA_DUMP_STATS)) -- cerr << "DFA: states " << states.size() << " removed " -- << count << " unreachable states\n"; -- } --} -- --/* test if two states have the same transitions under partition_map */ --bool DFA::same_mappings(State *s1, State *s2) --{ -- if (s1->cases.otherwise && s1->cases.otherwise != nonmatching) { -- if (!s2->cases.otherwise || s2->cases.otherwise == nonmatching) -- return false; -- Partition *p1 = s1->cases.otherwise->partition; -- Partition *p2 = s2->cases.otherwise->partition; -- if (p1 != p2) -- return false; -- } else if (s2->cases.otherwise && s2->cases.otherwise != nonmatching) { -- return false; -- } -- -- if (s1->cases.cases.size() != s2->cases.cases.size()) -- return false; -- for (Cases::iterator j1 = s1->cases.begin(); j1 != s1->cases.end(); -- j1++){ -- Cases::iterator j2 = s2->cases.cases.find(j1->first); -- if (j2 == s2->cases.end()) -- return false; -- Partition *p1 = j1->second->partition; -- Partition *p2 = j2->second->partition; -- if (p1 != p2) -- return false; -- } -- -- return true; --} -- --/* Do simple djb2 hashing against a States transition cases -- * this provides a rough initial guess at state equivalence as if a state -- * has a different number of transitions or has transitions on different -- * cases they will never be equivalent. -- * Note: this only hashes based off of the alphabet (not destination) -- * as different destinations could end up being equiv -- */ --size_t DFA::hash_trans(State *s) --{ -- unsigned long hash = 5381; -- -- for (Cases::iterator j = s->cases.begin(); j != s->cases.end(); j++){ -- hash = ((hash << 5) + hash) + j->first; -- State *k = j->second; -- hash = ((hash << 5) + hash) + k->cases.cases.size(); -- } -- -- if (s->cases.otherwise && s->cases.otherwise != nonmatching) { -- hash = ((hash << 5) + hash) + 5381; -- State *k = s->cases.otherwise; -- hash = ((hash << 5) + hash) + k->cases.cases.size(); -- } -- -- hash = (hash << 8) | s->cases.cases.size(); -- return hash; --} -- --/* minimize the number of dfa states */ --void DFA::minimize(dfaflags_t flags) --{ -- map , Partition *> perm_map; -- list partitions; -- -- /* Set up the initial partitions -- * minimium of - 1 non accepting, and 1 accepting -- * if trans hashing is used the accepting and non-accepting partitions -- * can be further split based on the number and type of transitions -- * a state makes. -- * If permission hashing is enabled the accepting partitions can -- * be further divided by permissions. This can result in not -- * obtaining a truely minimized dfa but comes close, and can speedup -- * minimization. -- */ -- int accept_count = 0; -- int final_accept = 0; -- for (Partition::iterator i = states.begin(); i != states.end(); i++) { -- uint64_t perm_hash = 0; -- if (flags & DFA_CONTROL_MINIMIZE_HASH_PERMS) { -- /* make every unique perm create a new partition */ -- perm_hash = ((uint64_t)(*i)->audit)<<32 | -- (uint64_t)(*i)->accept; -- } else if ((*i)->audit || (*i)->accept) { -- /* combine all perms together into a single parition */ -- perm_hash = 1; -- } /* else not an accept state so 0 for perm_hash */ -- -- size_t trans_hash = 0; -- if (flags & DFA_CONTROL_MINIMIZE_HASH_TRANS) -- trans_hash = hash_trans(*i); -- pair group = make_pair(perm_hash, trans_hash); -- map , Partition *>::iterator p = perm_map.find(group); -- if (p == perm_map.end()) { -- Partition *part = new Partition(); -- part->push_back(*i); -- perm_map.insert(make_pair(group, part)); -- partitions.push_back(part); -- (*i)->partition = part; -- if (perm_hash) -- accept_count++; -- } else { -- (*i)->partition = p->second; -- p->second->push_back(*i); -- } -- -- if ((flags & DFA_DUMP_PROGRESS) && -- (partitions.size() % 1000 == 0)) -- cerr << "\033[2KMinimize dfa: partitions " << partitions.size() << "\tinit " << partitions.size() << " (accept " << accept_count << ")\r"; -- } -- -- /* perm_map is no longer needed so free the memory it is using. -- * Don't remove - doing it manually here helps reduce peak memory usage. -- */ -- perm_map.clear(); -- -- int init_count = partitions.size(); -- if (flags & DFA_DUMP_PROGRESS) -- cerr << "\033[2KMinimize dfa: partitions " << partitions.size() << "\tinit " << init_count << " (accept " << accept_count << ")\r"; -- -- /* Now do repartitioning until each partition contains the set of -- * states that are the same. This will happen when the partition -- * splitting stables. With a worse case of 1 state per partition -- * ie. already minimized. -- */ -- Partition *new_part; -- int new_part_count; -- do { -- new_part_count = 0; -- for (list ::iterator p = partitions.begin(); -- p != partitions.end(); p++) { -- new_part = NULL; -- State *rep = *((*p)->begin()); -- Partition::iterator next; -- for (Partition::iterator s = ++(*p)->begin(); -- s != (*p)->end(); ) { -- if (same_mappings(rep, *s)) { -- ++s; -- continue; -- } -- if (!new_part) { -- new_part = new Partition; -- list ::iterator tmp = p; -- partitions.insert(++tmp, new_part); -- new_part_count++; -- } -- new_part->push_back(*s); -- s = (*p)->erase(s); -- } -- /* remapping partition_map for new_part entries -- * Do not do this above as it messes up same_mappings -- */ -- if (new_part) { -- for (Partition::iterator m = new_part->begin(); -- m != new_part->end(); m++) { -- (*m)->partition = new_part; -- } -- } -- if ((flags & DFA_DUMP_PROGRESS) && -- (partitions.size() % 100 == 0)) -- cerr << "\033[2KMinimize dfa: partitions " << partitions.size() << "\tinit " << init_count << " (accept " << accept_count << ")\r"; -- } -- } while(new_part_count); -- -- if (partitions.size() == states.size()) { -- if (flags & DFA_DUMP_STATS) -- cerr << "\033[2KDfa minimization no states removed: partitions " << partitions.size() << "\tinit " << init_count << " (accept " << accept_count << ")\n"; -- -- -- goto out; -- } -- -- /* Remap the dfa so it uses the representative states -- * Use the first state of a partition as the representative state -- * At this point all states with in a partion have transitions -- * to states within the same partitions, however this can slow -- * down compressed dfa compression as there are more states, -- */ -- for (list ::iterator p = partitions.begin(); -- p != partitions.end(); p++) { -- /* representative state for this partition */ -- State *rep = *((*p)->begin()); -- -- /* update representative state's transitions */ -- if (rep->cases.otherwise) { -- Partition *partition = rep->cases.otherwise->partition; -- rep->cases.otherwise = *partition->begin(); -- } -- for (Cases::iterator c = rep->cases.begin(); -- c != rep->cases.end(); c++) { -- Partition *partition = c->second->partition; -- c->second = *partition->begin(); -- } -- --//if ((*p)->size() > 1) --//cerr << rep->label << ": "; -- /* clear the state label for all non representative states, -- * and accumulate permissions */ -- for (Partition::iterator i = ++(*p)->begin(); i != (*p)->end(); i++) { --//cerr << " " << (*i)->label; -- (*i)->label = -1; -- rep->accept |= (*i)->accept; -- rep->audit |= (*i)->audit; -- } -- if (rep->accept || rep->audit) -- final_accept++; --//if ((*p)->size() > 1) --//cerr << "\n"; -- } -- if (flags & DFA_DUMP_STATS) -- cerr << "\033[2KMinimized dfa: final partitions " << partitions.size() << " (accept " << final_accept << ")" << "\tinit " << init_count << " (accept " << accept_count << ")\n"; -- -- -- -- /* make sure nonmatching and start state are up to date with the -- * mappings */ -- { -- Partition *partition = nonmatching->partition; -- if (*partition->begin() != nonmatching) { -- nonmatching = *partition->begin(); -- } -- -- partition = start->partition; -- if (*partition->begin() != start) { -- start = *partition->begin(); -- } -- } -- -- /* Now that the states have been remapped, remove all states -- * that are not the representive states for their partition, they -- * will have a label == -1 -- */ -- for (Partition::iterator i = states.begin(); i != states.end(); ) { -- if ((*i)->label == -1) { -- State *s = *i; -- i = states.erase(i); -- delete(s); -- } else -- i++; -- } -- --out: -- /* Cleanup */ -- while (!partitions.empty()) { -- Partition *p = partitions.front(); -- partitions.pop_front(); -- delete(p); -- } --} -- --/** -- * text-dump the DFA (for debugging). -- */ --void DFA::dump(ostream& os) --{ -- for (Partition::iterator i = states.begin(); i != states.end(); i++) { -- if (*i == start || (*i)->accept) { -- os << **i; -- if (*i == start) -- os << " <=="; -- if ((*i)->accept) { -- os << " (0x" << hex << (*i)->accept << " " << (*i)->audit << dec << ')'; -- } -- os << endl; -- } -- } -- os << endl; -- -- for (Partition::iterator i = states.begin(); i != states.end(); i++) { -- if ((*i)->cases.otherwise) -- os << **i << " -> " << (*i)->cases.otherwise << endl; -- for (Cases::iterator j = (*i)->cases.begin(); j != (*i)->cases.end(); j++) { -- os << **i << " -> " << j->second << ": " << j->first << endl; -- } -- } -- os << endl; --} -- --/** -- * Create a dot (graphviz) graph from the DFA (for debugging). -- */ --void DFA::dump_dot_graph(ostream& os) --{ -- os << "digraph \"dfa\" {" << endl; -- -- for (Partition::iterator i = states.begin(); i != states.end(); i++) { -- if (*i == nonmatching) -- continue; -- -- os << "\t\"" << **i << "\" [" << endl; -- if (*i == start) { -- os << "\t\tstyle=bold" << endl; -- } -- uint32_t perms = (*i)->accept; -- if (perms) { -- os << "\t\tlabel=\"" << **i << "\\n(" -- << perms << ")\"" << endl; -- } -- os << "\t]" << endl; -- } -- for (Partition::iterator i = states.begin(); i != states.end(); i++) { -- Cases& cases = (*i)->cases; -- Chars excluded; -- -- for (Cases::iterator j = cases.begin(); j != cases.end(); j++) { -- if (j->second == nonmatching) -- excluded.insert(j->first); -- else { -- os << "\t\"" << **i << "\" -> \""; -- os << j->second << "\" [" << endl; -- os << "\t\tlabel=\"" << j->first << "\"" << endl; -- os << "\t]" << endl; -- } -- } -- if (cases.otherwise && cases.otherwise != nonmatching) { -- os << "\t\"" << **i << "\" -> \"" << cases.otherwise -- << "\" [" << endl; -- if (!excluded.empty()) { -- os << "\t\tlabel=\"[^"; -- for (Chars::iterator i = excluded.begin(); -- i != excluded.end(); -- i++) { -- os << *i; -- } -- os << "]\"" << endl; -- } -- os << "\t]" << endl; -- } -- } -- os << '}' << endl; --} -- --/** -- * Compute character equivalence classes in the DFA to save space in the -- * transition table. -- */ --map DFA::equivalence_classes(dfaflags_t flags) --{ -- map classes; -- uchar next_class = 1; -- -- for (Partition::iterator i = states.begin(); i != states.end(); i++) { -- Cases& cases = (*i)->cases; -- -- /* Group edges to the same next state together */ -- map node_sets; -- for (Cases::iterator j = cases.begin(); j != cases.end(); j++) -- node_sets[j->second].insert(j->first); -- -- for (map::iterator j = node_sets.begin(); -- j != node_sets.end(); -- j++) { -- /* Group edges to the same next state together by class */ -- map node_classes; -- bool class_used = false; -- for (Chars::iterator k = j->second.begin(); -- k != j->second.end(); -- k++) { -- pair::iterator, bool> x = -- classes.insert(make_pair(*k, next_class)); -- if (x.second) -- class_used = true; -- pair::iterator, bool> y = -- node_classes.insert(make_pair(x.first->second, Chars())); -- y.first->second.insert(*k); -- } -- if (class_used) { -- next_class++; -- class_used = false; -- } -- for (map::iterator k = node_classes.begin(); -- k != node_classes.end(); -- k++) { -- /** -- * If any other characters are in the same class, move -- * the characters in this class into their own new class -- */ -- map::iterator l; -- for (l = classes.begin(); l != classes.end(); l++) { -- if (l->second == k->first && -- k->second.find(l->first) == k->second.end()) { -- class_used = true; -- break; -- } -- } -- if (class_used) { -- for (Chars::iterator l = k->second.begin(); -- l != k->second.end(); -- l++) { -- classes[*l] = next_class; -- } -- next_class++; -- class_used = false; -- } -- } -- } -- } -- -- if (flags & DFA_DUMP_EQUIV_STATS) -- fprintf(stderr, "Equiv class reduces to %d classes\n", next_class - 1); -- return classes; --} -- --/** -- * Text-dump the equivalence classes (for debugging). -- */ --void dump_equivalence_classes(ostream& os, map& eq) --{ -- map rev; -- -- for (map::iterator i = eq.begin(); i != eq.end(); i++) { -- Chars& chars = rev.insert(make_pair(i->second, -- Chars())).first->second; -- chars.insert(i->first); -- } -- os << "(eq):" << endl; -- for (map::iterator i = rev.begin(); i != rev.end(); i++) { -- os << (int)i->first << ':'; -- Chars& chars = i->second; -- for (Chars::iterator j = chars.begin(); j != chars.end(); j++) { -- os << ' ' << *j; -- } -- os << endl; -- } --} -- --/** -- * Replace characters with classes (which are also represented as -- * characters) in the DFA transition table. -- */ --void DFA::apply_equivalence_classes(map& eq) --{ -- /** -- * Note: We only transform the transition table; the nodes continue to -- * contain the original characters. -- */ -- for (Partition::iterator i = states.begin(); i != states.end(); i++) { -- map tmp; -- tmp.swap((*i)->cases.cases); -- for (Cases::iterator j = tmp.begin(); j != tmp.end(); j++) -- (*i)->cases.cases.insert(make_pair(eq[j->first], j->second)); -- } --} -- --/** -- * Flip the children of all cat nodes. This causes strings to be matched -- * back-forth. -- */ --void flip_tree(Node *node) --{ -- for (depth_first_traversal i(node); i; i++) { -- if (CatNode *cat = dynamic_cast(*i)) { -- swap(cat->child[0], cat->child[1]); -- } -- } --} -- --class TransitionTable { -- typedef vector > DefaultBase; -- typedef vector > NextCheck; --public: -- TransitionTable(DFA& dfa, map& eq, dfaflags_t flags); -- void dump(ostream& os); -- void flex_table(ostream& os, const char *name); -- void init_free_list(vector > &free_list, size_t prev, size_t start); -- bool fits_in(vector > &free_list, -- size_t base, Cases& cases); -- void insert_state(vector > &free_list, -- State *state, DFA& dfa); -- --private: -- vector accept; -- vector accept2; -- DefaultBase default_base; -- NextCheck next_check; -- map num; -- map& eq; -- uchar max_eq; -- size_t first_free; --}; -- -- --void TransitionTable::init_free_list(vector > &free_list, -- size_t prev, size_t start) { -- for (size_t i = start; i < free_list.size(); i++) { -- if (prev) -- free_list[prev].second = i; -- free_list[i].first = prev; -- prev = i; -- } -- free_list[free_list.size() -1].second = 0; --} -- --/** -- * new Construct the transition table. -- */ --TransitionTable::TransitionTable(DFA& dfa, map& eq, -- dfaflags_t flags) -- : eq(eq) --{ -- -- if (flags & DFA_DUMP_TRANS_PROGRESS) -- fprintf(stderr, "Compressing trans table:\r"); -- -- -- if (eq.empty()) -- max_eq = 255; -- else { -- max_eq = 0; -- for(map::iterator i = eq.begin(); i != eq.end(); i++) { -- if (i->second > max_eq) -- max_eq = i->second; -- } -- } -- -- /* Do initial setup adding up all the transitions and sorting by -- * transition count. -- */ -- size_t optimal = 2; -- multimap order; -- vector > free_list; -- -- for (Partition::iterator i = dfa.states.begin(); i != dfa.states.end(); i++) { -- if (*i == dfa.start || *i == dfa.nonmatching) -- continue; -- optimal += (*i)->cases.cases.size(); -- if (flags & DFA_CONTROL_TRANS_HIGH) { -- size_t range = 0; -- if ((*i)->cases.cases.size()) -- range = (*i)->cases.cases.rbegin()->first - (*i)->cases.begin()->first; -- size_t ord = ((256 - (*i)->cases.cases.size()) << 8) | -- (256 - range); -- /* reverse sort by entry count, most entries first */ -- order.insert(make_pair(ord, *i)); -- } -- } -- -- /* Insert the dummy nonmatching transition by hand */ -- next_check.push_back(make_pair(dfa.nonmatching, dfa.nonmatching)); -- default_base.push_back(make_pair(dfa.nonmatching, 0)); -- num.insert(make_pair(dfa.nonmatching, num.size())); -- -- accept.resize(dfa.states.size()); -- accept2.resize(dfa.states.size()); -- next_check.resize(optimal); -- free_list.resize(optimal); -- -- accept[0] = 0; -- accept2[0] = 0; -- first_free = 1; -- init_free_list(free_list, 0, 1); -- -- insert_state(free_list, dfa.start, dfa); -- accept[1] = 0; -- accept2[1] = 0; -- num.insert(make_pair(dfa.start, num.size())); -- -- int count = 2; -- -- if (!(flags & DFA_CONTROL_TRANS_HIGH)) { -- for (Partition::iterator i = dfa.states.begin(); i != dfa.states.end(); -- i++) { -- if (*i != dfa.nonmatching && *i != dfa.start) { -- insert_state(free_list, *i, dfa); -- accept[num.size()] = (*i)->accept; -- accept2[num.size()] = (*i)->audit; -- num.insert(make_pair(*i, num.size())); -- } -- if (flags & (DFA_DUMP_TRANS_PROGRESS)) { -- count++; -- if (count % 100 == 0) -- fprintf(stderr, "\033[2KCompressing trans table: insert state: %d/%ld\r", count, dfa.states.size()); -- } -- } -- } else { -- for (multimap ::iterator i = order.begin(); -- i != order.end(); i++) { -- if (i->second != dfa.nonmatching && i->second != dfa.start) { -- insert_state(free_list, i->second, dfa); -- accept[num.size()] = i->second->accept; -- accept2[num.size()] = i->second->audit; -- num.insert(make_pair(i->second, num.size())); -- } -- if (flags & (DFA_DUMP_TRANS_PROGRESS)) { -- count++; -- if (count % 100 == 0) -- fprintf(stderr, "\033[2KCompressing trans table: insert state: %d/%ld\r", count, dfa.states.size()); -- } -- } -- } -- -- if (flags & (DFA_DUMP_TRANS_STATS | DFA_DUMP_TRANS_PROGRESS)) { -- ssize_t size = 4 * next_check.size() + 6 * dfa.states.size(); -- fprintf(stderr, "\033[2KCompressed trans table: states %ld, next/check %ld, optimal next/check %ld avg/state %.2f, compression %ld/%ld = %.2f %%\n", dfa.states.size(), next_check.size(), optimal, (float)next_check.size()/(float)dfa.states.size(), size, 512 * dfa.states.size(), 100.0 - ((float) size * 100.0 / (float)(512 * dfa.states.size()))); -- } --} -- -- --/** -- * Does fit into position of the transition table? -- */ --bool TransitionTable::fits_in(vector > &free_list __attribute__((unused)), -- size_t pos, Cases& cases) --{ -- size_t c, base = pos - cases.begin()->first; -- for (Cases::iterator i = cases.begin(); i != cases.end(); i++) { -- c = base + i->first; -- /* if it overflows the next_check array it fits in as we will -- * resize */ -- if (c >= next_check.size()) -- return true; -- if (next_check[c].second) -- return false; -- } -- -- return true; --} -- --/** -- * Insert of into the transition table. -- */ --void TransitionTable::insert_state(vector > &free_list, -- State *from, DFA& dfa) --{ -- State *default_state = dfa.nonmatching; -- size_t base = 0; -- int resize; -- -- Cases& cases = from->cases; -- size_t c = cases.begin()->first; -- size_t prev = 0; -- size_t x = first_free; -- -- if (cases.otherwise) -- default_state = cases.otherwise; -- if (cases.cases.empty()) -- goto do_insert; -- --repeat: -- resize = 0; -- /* get the first free entry that won't underflow */ -- while (x && (x < c)) { -- prev = x; -- x = free_list[x].second; -- } -- -- /* try inserting until we succeed. */ -- while (x && !fits_in(free_list, x, cases)) { -- prev = x; -- x = free_list[x].second; -- } -- if (!x) { -- resize = 256 - cases.begin()->first; -- x = free_list.size(); -- /* set prev to last free */ -- } else if (x + 255 - cases.begin()->first >= next_check.size()) { -- resize = (255 - cases.begin()->first - (next_check.size() - 1 - x)); -- for (size_t y = x; y; y = free_list[y].second) -- prev = y; -- } -- if (resize) { -- /* expand next_check and free_list */ -- size_t old_size = free_list.size(); -- next_check.resize(next_check.size() + resize); -- free_list.resize(free_list.size() + resize); -- init_free_list(free_list, prev, old_size); -- if (!first_free) -- first_free = old_size;; -- if (x == old_size) -- goto repeat; -- } -- -- base = x - c; -- for (Cases::iterator j = cases.begin(); j != cases.end(); j++) { -- next_check[base + j->first] = make_pair(j->second, from); -- size_t prev = free_list[base + j->first].first; -- size_t next = free_list[base + j->first].second; -- if (prev) -- free_list[prev].second = next; -- if (next) -- free_list[next].first = prev; -- if (base + j->first == first_free) -- first_free = next; -- } -- --do_insert: -- default_base.push_back(make_pair(default_state, base)); --} -- --/** -- * Text-dump the transition table (for debugging). -- */ --void TransitionTable::dump(ostream& os) --{ -- map st; -- for (map::iterator i = num.begin(); -- i != num.end(); -- i++) { -- st.insert(make_pair(i->second, i->first)); -- } -- -- os << "size=" << default_base.size() << " (accept, default, base): {state} -> {default state}" << endl; -- for (size_t i = 0; i < default_base.size(); i++) { -- os << i << ": "; -- os << "(" << accept[i] << ", " -- << num[default_base[i].first] << ", " -- << default_base[i].second << ")"; -- if (st[i]) -- os << " " << *st[i]; -- if (default_base[i].first) -- os << " -> " << *default_base[i].first; -- os << endl; -- } -- -- os << "size=" << next_check.size() << " (next, check): {check state} -> {next state} : offset from base" << endl; -- for (size_t i = 0; i < next_check.size(); i++) { -- if (!next_check[i].second) -- continue; -- -- os << i << ": "; -- if (next_check[i].second) { -- os << "(" << num[next_check[i].first] << ", " -- << num[next_check[i].second] << ")" << " " -- << *next_check[i].second << " -> " -- << *next_check[i].first << ": "; -- -- size_t offs = i - default_base[num[next_check[i].second]].second; -- if (eq.size()) -- os << offs; -- else -- os << (uchar)offs; -- } -- os << endl; -- } --} -- --#if 0 --template --class FirstIterator { --public: -- FirstIterator(Iter pos) : pos(pos) { } -- typename Iter::value_type::first_type operator*() { return pos->first; } -- bool operator!=(FirstIterator& i) { return pos != i.pos; } -- void operator++() { ++pos; } -- ssize_t operator-(FirstIterator i) { return pos - i.pos; } --private: -- Iter pos; --}; -- --template --FirstIterator first_iterator(Iter iter) --{ -- return FirstIterator(iter); --} -- --template --class SecondIterator { --public: -- SecondIterator(Iter pos) : pos(pos) { } -- typename Iter::value_type::second_type operator*() { return pos->second; } -- bool operator!=(SecondIterator& i) { return pos != i.pos; } -- void operator++() { ++pos; } -- ssize_t operator-(SecondIterator i) { return pos - i.pos; } --private: -- Iter pos; --}; -- --template --SecondIterator second_iterator(Iter iter) --{ -- return SecondIterator(iter); --} --#endif -- --/** -- * Create a flex-style binary dump of the DFA tables. The table format -- * was partly reverse engineered from the flex sources and from -- * examining the tables that flex creates with its --tables-file option. -- * (Only the -Cf and -Ce formats are currently supported.) -- */ -- --#include "flex-tables.h" --#include "regexp.h" -- --static inline size_t pad64(size_t i) --{ -- return (i + (size_t)7) & ~(size_t)7; --} -- --string fill64(size_t i) --{ -- const char zeroes[8] = { }; -- string fill(zeroes, (i & 7) ? 8 - (i & 7) : 0); -- return fill; --} -- --template --size_t flex_table_size(Iter pos, Iter end) --{ -- return pad64(sizeof(struct table_header) + sizeof(*pos) * (end - pos)); --} -- --template --void write_flex_table(ostream& os, int id, Iter pos, Iter end) --{ -- struct table_header td = { 0, 0, 0, 0 }; -- size_t size = end - pos; -- -- td.td_id = htons(id); -- td.td_flags = htons(sizeof(*pos)); -- td.td_lolen = htonl(size); -- os.write((char *)&td, sizeof(td)); -- -- for (; pos != end; ++pos) { -- switch(sizeof(*pos)) { -- case 4: -- os.put((char)(*pos >> 24)); -- os.put((char)(*pos >> 16)); -- case 2: -- os.put((char)(*pos >> 8)); -- case 1: -- os.put((char)*pos); -- } -- } -- -- os << fill64(sizeof(td) + sizeof(*pos) * size); --} -- --void TransitionTable::flex_table(ostream& os, const char *name) --{ -- const char th_version[] = "notflex"; -- struct table_set_header th = { 0, 0, 0, 0 }; -- -- /** -- * Change the following two data types to adjust the maximum flex -- * table size. -- */ -- typedef uint16_t state_t; -- typedef uint32_t trans_t; -- -- if (default_base.size() >= (state_t)-1) { -- cerr << "Too many states (" << default_base.size() << ") for " -- "type state_t" << endl; -- exit(1); -- } -- if (next_check.size() >= (trans_t)-1) { -- cerr << "Too many transitions (" << next_check.size() << ") for " -- "type trans_t" << endl; -- exit(1); -- } -- -- /** -- * Create copies of the data structures so that we can dump the tables -- * using the generic write_flex_table() routine. -- */ -- vector equiv_vec; -- if (eq.size()) { -- equiv_vec.resize(256); -- for (map::iterator i = eq.begin(); i != eq.end(); i++) { -- equiv_vec[i->first] = i->second; -- } -- } -- -- vector default_vec; -- vector base_vec; -- for (DefaultBase::iterator i = default_base.begin(); -- i != default_base.end(); -- i++) { -- default_vec.push_back(num[i->first]); -- base_vec.push_back(i->second); -- } -- -- vector next_vec; -- vector check_vec; -- for (NextCheck::iterator i = next_check.begin(); -- i != next_check.end(); -- i++) { -- next_vec.push_back(num[i->first]); -- check_vec.push_back(num[i->second]); -- } -- -- /* Write the actual flex parser table. */ -- -- size_t hsize = pad64(sizeof(th) + sizeof(th_version) + strlen(name) + 1); -- th.th_magic = htonl(YYTH_REGEXP_MAGIC); -- th.th_hsize = htonl(hsize); -- th.th_ssize = htonl(hsize + -- flex_table_size(accept.begin(), accept.end()) + -- flex_table_size(accept2.begin(), accept2.end()) + -- (eq.size() ? -- flex_table_size(equiv_vec.begin(), equiv_vec.end()) : 0) + -- flex_table_size(base_vec.begin(), base_vec.end()) + -- flex_table_size(default_vec.begin(), default_vec.end()) + -- flex_table_size(next_vec.begin(), next_vec.end()) + -- flex_table_size(check_vec.begin(), check_vec.end())); -- os.write((char *)&th, sizeof(th)); -- os << th_version << (char)0 << name << (char)0; -- os << fill64(sizeof(th) + sizeof(th_version) + strlen(name) + 1); -- -- -- write_flex_table(os, YYTD_ID_ACCEPT, accept.begin(), accept.end()); -- write_flex_table(os, YYTD_ID_ACCEPT2, accept2.begin(), accept2.end()); -- if (eq.size()) -- write_flex_table(os, YYTD_ID_EC, equiv_vec.begin(), equiv_vec.end()); -- write_flex_table(os, YYTD_ID_BASE, base_vec.begin(), base_vec.end()); -- write_flex_table(os, YYTD_ID_DEF, default_vec.begin(), default_vec.end()); -- write_flex_table(os, YYTD_ID_NXT, next_vec.begin(), next_vec.end()); -- write_flex_table(os, YYTD_ID_CHK, check_vec.begin(), check_vec.end()); --} -- --#if 0 --typedef set AcceptNodes; --map dominance(DFA& dfa) --{ -- map is_dominated; -- -- for (States::iterator i = dfa.states.begin(); i != dfa.states.end(); i++) { -- AcceptNodes set1; -- for (State::iterator j = (*i)->begin(); j != (*i)->end(); j++) { -- if (AcceptNode *accept = dynamic_cast(*j)) -- set1.insert(accept); -- } -- for (AcceptNodes::iterator j = set1.begin(); j != set1.end(); j++) { -- pair::iterator, bool> x = -- is_dominated.insert(make_pair(*j, set1)); -- if (!x.second) { -- AcceptNodes &set2(x.first->second), set3; -- for (AcceptNodes::iterator l = set2.begin(); -- l != set2.end(); -- l++) { -- if (set1.find(*l) != set1.end()) -- set3.insert(*l); -- } -- set3.swap(set2); -- } -- } -- } -- return is_dominated; --} --#endif -- --void dump_regexp_rec(ostream& os, Node *tree) --{ -- if (tree->child[0]) -- dump_regexp_rec(os, tree->child[0]); -- os << *tree; -- if (tree->child[1]) -- dump_regexp_rec(os, tree->child[1]); --} -- --void dump_regexp(ostream& os, Node *tree) --{ -- dump_regexp_rec(os, tree); -- os << endl; --} -- --#include --#include -- --struct aare_ruleset { -- int reverse; -- Node *root; --}; -- --extern "C" aare_ruleset_t *aare_new_ruleset(int reverse) --{ -- aare_ruleset_t *container = (aare_ruleset_t *) malloc(sizeof(aare_ruleset_t)); -- if (!container) -- return NULL; -- -- container->root = NULL; -- container->reverse = reverse; -- -- return container; --} -- --extern "C" void aare_delete_ruleset(aare_ruleset_t *rules) --{ -- if (rules) { -- if (rules->root) -- rules->root->release(); -- free(rules); -- } --} -- --static inline int diff_qualifiers(uint32_t perm1, uint32_t perm2) --{ -- return ((perm1 & AA_EXEC_TYPE) && (perm2 & AA_EXEC_TYPE) && -- (perm1 & AA_EXEC_TYPE) != (perm2 & AA_EXEC_TYPE)); --} -- --/** -- * Compute the permission flags that this state corresponds to. If we -- * have any exact matches, then they override the execute and safe -- * execute flags. -- */ --uint32_t accept_perms(NodeSet *state, uint32_t *audit_ctl, int *error) --{ -- uint32_t perms = 0, exact_match_perms = 0, audit = 0, exact_audit = 0, -- quiet = 0, deny = 0; -- -- if (error) -- *error = 0; -- for (NodeSet::iterator i = state->begin(); i != state->end(); i++) { -- MatchFlag *match; -- if (!(match= dynamic_cast(*i))) -- continue; -- if (dynamic_cast(match)) { -- /* exact match only ever happens with x */ -- if (!is_merged_x_consistent(exact_match_perms, -- match->flag) && error) -- *error = 1;; -- exact_match_perms |= match->flag; -- exact_audit |= match->audit; -- } else if (dynamic_cast(match)) { -- deny |= match->flag; -- quiet |= match->audit; -- } else { -- if (!is_merged_x_consistent(perms, match->flag) && error) -- *error = 1; -- perms |= match->flag; -- audit |= match->audit; -- } -- } -- --//if (audit || quiet) --//fprintf(stderr, "perms: 0x%x, audit: 0x%x exact: 0x%x eaud: 0x%x deny: 0x%x quiet: 0x%x\n", perms, audit, exact_match_perms, exact_audit, deny, quiet); -- -- perms |= exact_match_perms & -- ~(AA_USER_EXEC_TYPE | AA_OTHER_EXEC_TYPE); -- -- if (exact_match_perms & AA_USER_EXEC_TYPE) { -- perms = (exact_match_perms & AA_USER_EXEC_TYPE) | -- (perms & ~AA_USER_EXEC_TYPE); -- audit = (exact_audit & AA_USER_EXEC_TYPE) | -- (audit & ~ AA_USER_EXEC_TYPE); -- } -- if (exact_match_perms & AA_OTHER_EXEC_TYPE) { -- perms = (exact_match_perms & AA_OTHER_EXEC_TYPE) | -- (perms & ~AA_OTHER_EXEC_TYPE); -- audit = (exact_audit & AA_OTHER_EXEC_TYPE) | -- (audit & ~AA_OTHER_EXEC_TYPE); -- } -- if (perms & AA_USER_EXEC & deny) -- perms &= ~AA_USER_EXEC_TYPE; -- -- if (perms & AA_OTHER_EXEC & deny) -- perms &= ~AA_OTHER_EXEC_TYPE; -- -- perms &= ~deny; -- -- if (audit_ctl) -- *audit_ctl = PACK_AUDIT_CTL(audit, quiet & deny); -- --// if (perms & AA_ERROR_BIT) { --// fprintf(stderr, "error bit 0x%x\n", perms); --// exit(255); --//} -- -- //if (perms & AA_EXEC_BITS) -- //fprintf(stderr, "accept perm: 0x%x\n", perms); -- /* -- if (perms & ~AA_VALID_PERMS) -- yyerror(_("Internal error accumulated invalid perm 0x%llx\n"), perms); -- */ -- --//if (perms & AA_CHANGE_HAT) --// fprintf(stderr, "change_hat 0x%x\n", perms); -- -- if (*error) -- fprintf(stderr, "profile has merged rule with conflicting x modifiers\n"); -- -- return perms; --} -- --extern "C" int aare_add_rule(aare_ruleset_t *rules, char *rule, int deny, -- uint32_t perms, uint32_t audit, dfaflags_t flags) --{ -- return aare_add_rule_vec(rules, deny, perms, audit, 1, &rule, flags); --} -- --#define FLAGS_WIDTH 2 --#define MATCH_FLAGS_SIZE (sizeof(uint32_t) * 8 - 1) --MatchFlag *match_flags[FLAGS_WIDTH][MATCH_FLAGS_SIZE]; --DenyMatchFlag *deny_flags[FLAGS_WIDTH][MATCH_FLAGS_SIZE]; --#define EXEC_MATCH_FLAGS_SIZE (AA_EXEC_COUNT *2 * 2 * 2) /* double for each of ix pux, unsafe x bits * u::o */ --MatchFlag *exec_match_flags[FLAGS_WIDTH][EXEC_MATCH_FLAGS_SIZE]; /* mods + unsafe + ix + pux * u::o*/ --ExactMatchFlag *exact_match_flags[FLAGS_WIDTH][EXEC_MATCH_FLAGS_SIZE];/* mods + unsafe + ix + pux *u::o*/ -- --extern "C" void aare_reset_matchflags(void) --{ -- uint32_t i, j; --#define RESET_FLAGS(group, size) { \ -- for (i = 0; i < FLAGS_WIDTH; i++) { \ -- for (j = 0; j < size; j++) { \ -- if ((group)[i][j]) delete (group)[i][j]; \ -- (group)[i][j] = NULL; \ -- } \ -- } \ --} -- RESET_FLAGS(match_flags,MATCH_FLAGS_SIZE); -- RESET_FLAGS(deny_flags,MATCH_FLAGS_SIZE); -- RESET_FLAGS(exec_match_flags,EXEC_MATCH_FLAGS_SIZE); -- RESET_FLAGS(exact_match_flags,EXEC_MATCH_FLAGS_SIZE); --#undef RESET_FLAGS --} -- --extern "C" int aare_add_rule_vec(aare_ruleset_t *rules, int deny, -- uint32_t perms, uint32_t audit, -- int count, char **rulev, -- dfaflags_t flags) --{ -- Node *tree = NULL, *accept; -- int exact_match; -- -- assert(perms != 0); -- -- if (regexp_parse(&tree, rulev[0])) -- return 0; -- for (int i = 1; i < count; i++) { -- Node *subtree = NULL; -- Node *node = new CharNode(0); -- if (!node) -- return 0; -- tree = new CatNode(tree, node); -- if (regexp_parse(&subtree, rulev[i])) -- return 0; -- tree = new CatNode(tree, subtree); -- } -- -- /* -- * Check if we have an expression with or without wildcards. This -- * determines how exec modifiers are merged in accept_perms() based -- * on how we split permission bitmasks here. -- */ -- exact_match = 1; -- for (depth_first_traversal i(tree); i; i++) { -- if (dynamic_cast(*i) || -- dynamic_cast(*i) || -- dynamic_cast(*i) || -- dynamic_cast(*i) || -- dynamic_cast(*i)) -- exact_match = 0; -- } -- -- if (rules->reverse) -- flip_tree(tree); -- -- --/* 0x7f == 4 bits x mods + 1 bit unsafe mask + 1 bit ix, + 1 pux after shift */ --#define EXTRACT_X_INDEX(perm, shift) (((perm) >> (shift + 7)) & 0x7f) -- --//if (perms & ALL_AA_EXEC_TYPE && (!perms & AA_EXEC_BITS)) --// fprintf(stderr, "adding X rule without MAY_EXEC: 0x%x %s\n", perms, rulev[0]); -- --//if (perms & ALL_EXEC_TYPE) --// fprintf(stderr, "adding X rule %s 0x%x\n", rulev[0], perms); -- --//if (audit) --//fprintf(stderr, "adding rule with audit bits set: 0x%x %s\n", audit, rulev[0]); -- --//if (perms & AA_CHANGE_HAT) --// fprintf(stderr, "adding change_hat rule %s\n", rulev[0]); -- --/* the permissions set is assumed to be non-empty if any audit -- * bits are specified */ -- accept = NULL; -- for (unsigned int n = 0; perms && n < (sizeof(perms) * 8) ; n++) { -- uint32_t mask = 1 << n; -- -- if (perms & mask) { -- int ai = audit & mask ? 1 : 0; -- perms &= ~mask; -- -- Node *flag; -- if (mask & ALL_AA_EXEC_TYPE) -- /* these cases are covered by EXEC_BITS */ -- continue; -- if (deny) { -- if (deny_flags[ai][n]) { -- flag = deny_flags[ai][n]; -- } else { --//fprintf(stderr, "Adding deny ai %d mask 0x%x audit 0x%x\n", ai, mask, audit & mask); -- deny_flags[ai][n] = new DenyMatchFlag(mask, audit&mask); -- flag = deny_flags[ai][n]; -- } -- } else if (mask & AA_EXEC_BITS) { -- uint32_t eperm = 0; -- uint32_t index = 0; -- if (mask & AA_USER_EXEC) { -- eperm = mask | (perms & AA_USER_EXEC_TYPE); -- index = EXTRACT_X_INDEX(eperm, AA_USER_SHIFT); -- } else { -- eperm = mask | (perms & AA_OTHER_EXEC_TYPE); -- index = EXTRACT_X_INDEX(eperm, AA_OTHER_SHIFT) + (AA_EXEC_COUNT << 2); -- } --//fprintf(stderr, "index %d eperm 0x%x\n", index, eperm); -- if (exact_match) { -- if (exact_match_flags[ai][index]) { -- flag = exact_match_flags[ai][index]; -- } else { -- exact_match_flags[ai][index] = new ExactMatchFlag(eperm, audit&mask); -- flag = exact_match_flags[ai][index]; -- } -- } else { -- if (exec_match_flags[ai][index]) { -- flag = exec_match_flags[ai][index]; -- } else { -- exec_match_flags[ai][index] = new MatchFlag(eperm, audit&mask); -- flag = exec_match_flags[ai][index]; -- } -- } -- } else { -- if (match_flags[ai][n]) { -- flag = match_flags[ai][n]; -- } else { -- match_flags[ai][n] = new MatchFlag(mask, audit&mask); -- flag = match_flags[ai][n]; -- } -- } -- if (accept) -- accept = new AltNode(accept, flag); -- else -- accept = flag; -- } -- } -- -- if (flags & DFA_DUMP_RULE_EXPR) { -- cerr << "rule: "; -- cerr << rulev[0]; -- for (int i = 1; i < count; i++) { -- cerr << "\\x00"; -- cerr << rulev[i]; -- } -- cerr << " -> "; -- tree->dump(cerr); -- cerr << "\n\n"; -- } -- -- if (rules->root) -- rules->root = new AltNode(rules->root, new CatNode(tree, accept)); -- else -- rules->root = new CatNode(tree, accept); -- -- return 1; -- --} -- --/* create a dfa from the ruleset -- * returns: buffer contain dfa tables, @size set to the size of the tables -- * else NULL on failure -- */ --extern "C" void *aare_create_dfa(aare_ruleset_t *rules, size_t *size, dfaflags_t flags) --{ -- char *buffer = NULL; -- -- label_nodes(rules->root); -- if (flags & DFA_DUMP_TREE) { -- cerr << "\nDFA: Expression Tree\n"; -- rules->root->dump(cerr); -- cerr << "\n\n"; -- } -- -- if (flags & DFA_CONTROL_TREE_SIMPLE) { -- rules->root = simplify_tree(rules->root, flags); -- -- if (flags & DFA_DUMP_SIMPLE_TREE) { -- cerr << "\nDFA: Simplified Expression Tree\n"; -- rules->root->dump(cerr); -- cerr << "\n\n"; -- } -- } -- -- stringstream stream; -- try { -- DFA dfa(rules->root, flags); -- if (flags & DFA_DUMP_UNIQ_PERMS) -- dfa.dump_uniq_perms("dfa"); -- -- if (flags & DFA_CONTROL_MINIMIZE) { -- dfa.minimize(flags); -- -- if (flags & DFA_DUMP_MIN_UNIQ_PERMS) -- dfa.dump_uniq_perms("minimized dfa"); -- } -- if (flags & DFA_CONTROL_REMOVE_UNREACHABLE) -- dfa.remove_unreachable(flags); -- -- if (flags & DFA_DUMP_STATES) -- dfa.dump(cerr); -- -- if (flags & DFA_DUMP_GRAPH) -- dfa.dump_dot_graph(cerr); -- -- map eq; -- if (flags & DFA_CONTROL_EQUIV) { -- eq = dfa.equivalence_classes(flags); -- dfa.apply_equivalence_classes(eq); -- -- if (flags & DFA_DUMP_EQUIV) { -- cerr << "\nDFA equivalence class\n"; -- dump_equivalence_classes(cerr, eq); -- } -- } else if (flags & DFA_DUMP_EQUIV) -- cerr << "\nDFA did not generate an equivalence class\n"; -- -- TransitionTable transition_table(dfa, eq, flags); -- if (flags & DFA_DUMP_TRANS_TABLE) -- transition_table.dump(cerr); -- transition_table.flex_table(stream, ""); -- } catch (int error) { -- *size = 0; -- return NULL; -- } -- -- stringbuf *buf = stream.rdbuf(); -- -- buf->pubseekpos(0); -- *size = buf->in_avail(); -- -- buffer = (char *)malloc(*size); -- if (!buffer) -- return NULL; -- buf->sgetn(buffer, *size); -- return buffer; --} ---- /dev/null -+++ b/parser/libapparmor_re/regexp.yy -@@ -0,0 +1,3082 @@ -+/* -+ * regexp.y -- Regular Expression Matcher Generator -+ * (C) 2006, 2007 Andreas Gruenbacher -+ * -+ * Implementation based on the Lexical Analysis chapter of: -+ * Alfred V. Aho, Ravi Sethi, Jeffrey D. Ullman: -+ * Compilers: Principles, Techniques, and Tools (The "Dragon Book"), -+ * Addison-Wesley, 1986. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * See http://www.gnu.org for more details. -+ */ -+ -+%{ -+ /* #define DEBUG_TREE */ -+ -+ #include -+ #include -+ #include -+ #include -+ #include -+ #include -+ #include -+ #include -+ -+ using namespace std; -+ -+ typedef unsigned char uchar; -+ typedef set Chars; -+ -+ ostream& operator<<(ostream& os, uchar c); -+ -+ /* Compute the union of two sets. */ -+ template -+ set operator+(const set& a, const set& b) -+ { -+ set c(a); -+ c.insert(b.begin(), b.end()); -+ return c; -+ } -+ -+ /** -+ * When creating DFAs from regex trees, a DFA state is constructed from -+ * a set of important nodes in the syntax tree. This includes AcceptNodes, -+ * which indicate that when a match ends in a particular state, the -+ * regular expressions that the AcceptNode belongs to match. -+ */ -+ class ImportantNode; -+ typedef set NodeSet; -+ -+ /** -+ * Out-edges from a state to another: we store the follow-set of Nodes -+ * for each input character that is not a default match in -+ * cases (i.e., following a CharNode or CharSetNode), and default -+ * matches in otherwise as well as in all matching explicit cases -+ * (i.e., following an AnyCharNode or NotCharSetNode). This avoids -+ * enumerating all the explicit tranitions for default matches. -+ */ -+ typedef struct NodeCases { -+ typedef map::iterator iterator; -+ iterator begin() { return cases.begin(); } -+ iterator end() { return cases.end(); } -+ -+ NodeCases() : otherwise(0) { } -+ map cases; -+ NodeSet *otherwise; -+ } NodeCases; -+ -+ -+ /* An abstract node in the syntax tree. */ -+ class Node { -+ public: -+ Node() : -+ nullable(false) { child[0] = child[1] = 0; } -+ Node(Node *left) : -+ nullable(false) { child[0] = left; child[1] = 0; } -+ Node(Node *left, Node *right) : -+ nullable(false) { child[0] = left; child[1] = right; } -+ virtual ~Node() -+ { -+ if (child[0]) -+ child[0]->release(); -+ if (child[1]) -+ child[1]->release(); -+ } -+ -+ /** -+ * See the "Dragon Book" for an explanation of nullable, firstpos, -+ * lastpos, and followpos. -+ */ -+ virtual void compute_nullable() { } -+ virtual void compute_firstpos() = 0; -+ virtual void compute_lastpos() = 0; -+ virtual void compute_followpos() { } -+ virtual int eq(Node *other) = 0; -+ virtual ostream& dump(ostream& os) = 0; -+ -+ bool nullable; -+ NodeSet firstpos, lastpos, followpos; -+ /* child 0 is left, child 1 is right */ -+ Node *child[2]; -+ -+ unsigned int label; /* unique number for debug etc */ -+ /** -+ * We indirectly release Nodes through a virtual function because -+ * accept and Eps Nodes are shared, and must be treated specially. -+ * We could use full reference counting here but the indirect release -+ * is sufficient and has less overhead -+ */ -+ virtual void release(void) { -+ delete this; -+ } -+ }; -+ -+ class InnerNode : public Node { -+ public: -+ InnerNode() : Node() { }; -+ InnerNode(Node *left) : Node(left) {}; -+ InnerNode(Node *left, Node *right) : Node(left, right) { }; -+ }; -+ -+ class OneChildNode : public InnerNode { -+ public: -+ OneChildNode(Node *left) : InnerNode(left) { }; -+ }; -+ -+ class TwoChildNode : public InnerNode { -+ public: -+ TwoChildNode(Node *left, Node *right) : InnerNode(left, right) { }; -+ }; -+ -+ class LeafNode : public Node { -+ public: -+ LeafNode() : Node() { }; -+ -+ }; -+ -+ /* Match nothing (//). */ -+ class EpsNode : public LeafNode { -+ public: -+ EpsNode() : LeafNode() -+ { -+ nullable = true; -+ label = 0; -+ } -+ void release(void) -+ { -+ /* don't delete Eps nodes because there is a single static instance -+ * shared by all trees. Look for epsnode in the code -+ */ -+ } -+ -+ void compute_firstpos() -+ { -+ } -+ void compute_lastpos() -+ { -+ } -+ int eq(Node *other) { -+ if (dynamic_cast(other)) -+ return 1; -+ return 0; -+ } -+ ostream& dump(ostream& os) -+ { -+ return os << "[]"; -+ } -+ }; -+ -+ /** -+ * Leaf nodes in the syntax tree are important to us: they describe the -+ * characters that the regular expression matches. We also consider -+ * AcceptNodes import: they indicate when a regular expression matches. -+ */ -+ class ImportantNode : public LeafNode { -+ public: -+ ImportantNode() : LeafNode() { } -+ void compute_firstpos() -+ { -+ firstpos.insert(this); -+ } -+ void compute_lastpos() { -+ lastpos.insert(this); -+ } -+ virtual void follow(NodeCases& cases) = 0; -+ }; -+ -+ /* common base class for all the different classes that contain -+ * character information. -+ */ -+ class CNode : public ImportantNode { -+ public: -+ CNode() : ImportantNode() { } -+ -+ }; -+ -+ /* Match one specific character (/c/). */ -+ class CharNode : public CNode { -+ public: -+ CharNode(uchar c) : c(c) { } -+ void follow(NodeCases& cases) -+ { -+ NodeSet **x = &cases.cases[c]; -+ if (!*x) { -+ if (cases.otherwise) -+ *x = new NodeSet(*cases.otherwise); -+ else -+ *x = new NodeSet; -+ } -+ (*x)->insert(followpos.begin(), followpos.end()); -+ } -+ int eq(Node *other) { -+ CharNode *o = dynamic_cast(other); -+ if (o) { -+ return c == o->c; -+ } -+ return 0; -+ } -+ ostream& dump(ostream& os) -+ { -+ return os << c; -+ } -+ -+ uchar c; -+ }; -+ -+ /* Match a set of characters (/[abc]/). */ -+ class CharSetNode : public CNode { -+ public: -+ CharSetNode(Chars& chars) : chars(chars) { } -+ void follow(NodeCases& cases) -+ { -+ for (Chars::iterator i = chars.begin(); i != chars.end(); i++) { -+ NodeSet **x = &cases.cases[*i]; -+ if (!*x) { -+ if (cases.otherwise) -+ *x = new NodeSet(*cases.otherwise); -+ else -+ *x = new NodeSet; -+ } -+ (*x)->insert(followpos.begin(), followpos.end()); -+ } -+ } -+ int eq(Node *other) { -+ CharSetNode *o = dynamic_cast(other); -+ if (!o || chars.size() != o->chars.size()) -+ return 0; -+ -+ for (Chars::iterator i = chars.begin(), j = o->chars.begin(); -+ i != chars.end() && j != o->chars.end(); -+ i++, j++) { -+ if (*i != *j) -+ return 0; -+ } -+ return 1; -+ } -+ ostream& dump(ostream& os) -+ { -+ os << '['; -+ for (Chars::iterator i = chars.begin(); i != chars.end(); i++) -+ os << *i; -+ return os << ']'; -+ } -+ -+ Chars chars; -+ }; -+ -+ /* Match all except one character (/[^abc]/). */ -+ class NotCharSetNode : public CNode { -+ public: -+ NotCharSetNode(Chars& chars) : chars(chars) { } -+ void follow(NodeCases& cases) -+ { -+ if (!cases.otherwise) -+ cases.otherwise = new NodeSet; -+ for (Chars::iterator j = chars.begin(); j != chars.end(); j++) { -+ NodeSet **x = &cases.cases[*j]; -+ if (!*x) -+ *x = new NodeSet(*cases.otherwise); -+ } -+ /** -+ * Note: Add to the nonmatching characters after copying away the -+ * old otherwise state for the matching characters. -+ */ -+ cases.otherwise->insert(followpos.begin(), followpos.end()); -+ for (NodeCases::iterator i = cases.begin(); i != cases.end(); i++) { -+ if (chars.find(i->first) == chars.end()) -+ i->second->insert(followpos.begin(), followpos.end()); -+ } -+ } -+ int eq(Node *other) { -+ NotCharSetNode *o = dynamic_cast(other); -+ if (!o || chars.size() != o->chars.size()) -+ return 0; -+ -+ for (Chars::iterator i = chars.begin(), j = o->chars.begin(); -+ i != chars.end() && j != o->chars.end(); -+ i++, j++) { -+ if (*i != *j) -+ return 0; -+ } -+ return 1; -+ } -+ ostream& dump(ostream& os) -+ { -+ os << "[^"; -+ for (Chars::iterator i = chars.begin(); i != chars.end(); i++) -+ os << *i; -+ return os << ']'; -+ } -+ -+ Chars chars; -+ }; -+ -+ /* Match any character (/./). */ -+ class AnyCharNode : public CNode { -+ public: -+ AnyCharNode() { } -+ void follow(NodeCases& cases) -+ { -+ if (!cases.otherwise) -+ cases.otherwise = new NodeSet; -+ cases.otherwise->insert(followpos.begin(), followpos.end()); -+ for (NodeCases::iterator i = cases.begin(); i != cases.end(); i++) -+ i->second->insert(followpos.begin(), followpos.end()); -+ } -+ int eq(Node *other) { -+ if (dynamic_cast(other)) -+ return 1; -+ return 0; -+ } -+ ostream& dump(ostream& os) { -+ return os << "."; -+ } -+ }; -+ -+ /** -+ * Indicate that a regular expression matches. An AcceptNode itself -+ * doesn't match anything, so it will never generate any transitions. -+ */ -+ class AcceptNode : public ImportantNode { -+ public: -+ AcceptNode() {} -+ void release(void) -+ { -+ /* don't delete AcceptNode via release as they are shared, -+ * and will be deleted when the table the are stored in is deleted -+ */ -+ } -+ -+ void follow(NodeCases& cases __attribute__((unused))) -+ { -+ /* Nothing to follow. */ -+ } -+ /* requires accept nodes to be common by pointer */ -+ int eq(Node *other) { -+ if (dynamic_cast(other)) -+ return (this == other); -+ return 0; -+ } -+ }; -+ -+ /* Match a node zero or more times. (This is a unary operator.) */ -+ class StarNode : public OneChildNode { -+ public: -+ StarNode(Node *left) : -+ OneChildNode(left) -+ { -+ nullable = true; -+ } -+ void compute_firstpos() -+ { -+ firstpos = child[0]->firstpos; -+ } -+ void compute_lastpos() -+ { -+ lastpos = child[0]->lastpos; -+ } -+ void compute_followpos() -+ { -+ NodeSet from = child[0]->lastpos, to = child[0]->firstpos; -+ for(NodeSet::iterator i = from.begin(); i != from.end(); i++) { -+ (*i)->followpos.insert(to.begin(), to.end()); -+ } -+ } -+ int eq(Node *other) { -+ if (dynamic_cast(other)) -+ return child[0]->eq(other->child[0]); -+ return 0; -+ } -+ ostream& dump(ostream& os) -+ { -+ os << '('; -+ child[0]->dump(os); -+ return os << ")*"; -+ } -+ }; -+ -+ /* Match a node one or more times. (This is a unary operator.) */ -+ class PlusNode : public OneChildNode { -+ public: -+ PlusNode(Node *left) : -+ OneChildNode(left) { } -+ void compute_nullable() -+ { -+ nullable = child[0]->nullable; -+ } -+ void compute_firstpos() -+ { -+ firstpos = child[0]->firstpos; -+ } -+ void compute_lastpos() -+ { -+ lastpos = child[0]->lastpos; -+ } -+ void compute_followpos() -+ { -+ NodeSet from = child[0]->lastpos, to = child[0]->firstpos; -+ for(NodeSet::iterator i = from.begin(); i != from.end(); i++) { -+ (*i)->followpos.insert(to.begin(), to.end()); -+ } -+ } -+ int eq(Node *other) { -+ if (dynamic_cast(other)) -+ return child[0]->eq(other->child[0]); -+ return 0; -+ } -+ ostream& dump(ostream& os) -+ { -+ os << '('; -+ child[0]->dump(os); -+ return os << ")+"; -+ } -+ }; -+ -+ /* Match a pair of consecutive nodes. */ -+ class CatNode : public TwoChildNode { -+ public: -+ CatNode(Node *left, Node *right) : -+ TwoChildNode(left, right) { } -+ void compute_nullable() -+ { -+ nullable = child[0]->nullable && child[1]->nullable; -+ } -+ void compute_firstpos() -+ { -+ if (child[0]->nullable) -+ firstpos = child[0]->firstpos + child[1]->firstpos; -+ else -+ firstpos = child[0]->firstpos; -+ } -+ void compute_lastpos() -+ { -+ if (child[1]->nullable) -+ lastpos = child[0]->lastpos + child[1]->lastpos; -+ else -+ lastpos = child[1]->lastpos; -+ } -+ void compute_followpos() -+ { -+ NodeSet from = child[0]->lastpos, to = child[1]->firstpos; -+ for(NodeSet::iterator i = from.begin(); i != from.end(); i++) { -+ (*i)->followpos.insert(to.begin(), to.end()); -+ } -+ } -+ int eq(Node *other) { -+ if (dynamic_cast(other)) { -+ if (!child[0]->eq(other->child[0])) -+ return 0; -+ return child[1]->eq(other->child[1]); -+ } -+ return 0; -+ } -+ ostream& dump(ostream& os) -+ { -+ child[0]->dump(os); -+ child[1]->dump(os); -+ return os; -+ //return os << ' '; -+ } -+ }; -+ -+ /* Match one of two alternative nodes. */ -+ class AltNode : public TwoChildNode { -+ public: -+ AltNode(Node *left, Node *right) : -+ TwoChildNode(left, right) { } -+ void compute_nullable() -+ { -+ nullable = child[0]->nullable || child[1]->nullable; -+ } -+ void compute_lastpos() -+ { -+ lastpos = child[0]->lastpos + child[1]->lastpos; -+ } -+ void compute_firstpos() -+ { -+ firstpos = child[0]->firstpos + child[1]->firstpos; -+ } -+ int eq(Node *other) { -+ if (dynamic_cast(other)) { -+ if (!child[0]->eq(other->child[0])) -+ return 0; -+ return child[1]->eq(other->child[1]); -+ } -+ return 0; -+ } -+ ostream& dump(ostream& os) -+ { -+ os << '('; -+ child[0]->dump(os); -+ os << '|'; -+ child[1]->dump(os); -+ os << ')'; -+ return os; -+ } -+ }; -+ -+/* Use a single static EpsNode as it carries no node specific information */ -+static EpsNode epsnode; -+ -+/* -+ * Normalize the regex parse tree for factoring and cancelations. Normalization -+ * reorganizes internal (alt and cat) nodes into a fixed "normalized" form that -+ * simplifies factoring code, in that it produces a canonicalized form for -+ * the direction being normalized so that the factoring code does not have -+ * to consider as many cases. -+ * -+ * left normalization (dir == 0) uses these rules -+ * (E | a) -> (a | E) -+ * (a | b) | c -> a | (b | c) -+ * (ab)c -> a(bc) -+ * -+ * right normalization (dir == 1) uses the same rules but reversed -+ * (a | E) -> (E | a) -+ * a | (b | c) -> (a | b) | c -+ * a(bc) -> (ab)c -+ * -+ * Note: This is written iteratively for a given node (the top node stays -+ * fixed and the children are rotated) instead of recursively. -+ * For a given node under examination rotate over nodes from -+ * dir to !dir. Until no dir direction node meets the criterial. -+ * Then recurse to the children (which will have a different node type) -+ * to make sure they are normalized. -+ * Normalization of a child node is guarenteed to not affect the -+ * normalization of the parent. -+ * -+ * For cat nodes the depth first traverse order is guarenteed to be -+ * maintained. This is not necessary for altnodes. -+ * -+ * Eg. For left normalization -+ * -+ * |1 |1 -+ * / \ / \ -+ * |2 T -> a |2 -+ * / \ / \ -+ * |3 c b |3 -+ * / \ / \ -+ * a b c T -+ * -+ */ -+static void rotate_node(Node *t, int dir) { -+ // (a | b) | c -> a | (b | c) -+ // (ab)c -> a(bc) -+ Node *left = t->child[dir]; -+ t->child[dir] = left->child[dir]; -+ left->child[dir] = left->child[!dir]; -+ left->child[!dir] = t->child[!dir]; -+ t->child[!dir] = left; -+} -+ -+void normalize_tree(Node *t, int dir) -+{ -+ if (dynamic_cast(t)) -+ return; -+ -+ for (;;) { -+ if ((&epsnode == t->child[dir]) && -+ (&epsnode != t->child[!dir]) && -+ dynamic_cast(t)) { -+ // (E | a) -> (a | E) -+ // Ea -> aE -+ Node *c = t->child[dir]; -+ t->child[dir] = t->child[!dir]; -+ t->child[!dir] = c; -+ // Don't break here as 'a' may be a tree that -+ // can be pulled up. -+ } else if ((dynamic_cast(t) && -+ dynamic_cast(t->child[dir])) || -+ (dynamic_cast(t) && -+ dynamic_cast(t->child[dir]))) { -+ // (a | b) | c -> a | (b | c) -+ // (ab)c -> a(bc) -+ rotate_node(t, dir); -+ } else if (dynamic_cast(t) && -+ dynamic_cast(t->child[dir]) && -+ dynamic_cast(t->child[!dir])) { -+ // [a] | b -> b | [a] -+ Node *c = t->child[dir]; -+ t->child[dir] = t->child[!dir]; -+ t->child[!dir] = c; -+ } else { -+ break; -+ } -+ } -+ if (t->child[dir]) -+ normalize_tree(t->child[dir], dir); -+ if (t->child[!dir]) -+ normalize_tree(t->child[!dir], dir); -+} -+ -+//charset conversion is disabled for now, -+//it hinders tree optimization in some cases, so it need to be either -+//done post optimization, or have extra factoring rules added -+#if 0 -+static Node *merge_charset(Node *a, Node *b) -+{ -+ if (dynamic_cast(a) && -+ dynamic_cast(b)) { -+ Chars chars; -+ chars.insert(dynamic_cast(a)->c); -+ chars.insert(dynamic_cast(b)->c); -+ CharSetNode *n = new CharSetNode(chars); -+ return n; -+ } else if (dynamic_cast(a) && -+ dynamic_cast(b)) { -+ Chars *chars = &dynamic_cast(b)->chars; -+ chars->insert(dynamic_cast(a)->c); -+ return b; -+ } else if (dynamic_cast(a) && -+ dynamic_cast(b)) { -+ Chars *from = &dynamic_cast(a)->chars; -+ Chars *to = &dynamic_cast(b)->chars; -+ for (Chars::iterator i = from->begin(); i != from->end(); i++) -+ to->insert(*i); -+ return b; -+ } -+ -+ //return ???; -+} -+ -+static Node *alt_to_charsets(Node *t, int dir) -+{ -+/* -+ Node *first = NULL; -+ Node *p = t; -+ Node *i = t; -+ for (;dynamic_cast(i);) { -+ if (dynamic_cast(i->child[dir]) || -+ dynamic_cast(i->child[dir])) { -+ if (!first) { -+ first = i; -+ p = i; -+ i = i->child[!dir]; -+ } else { -+ first->child[dir] = merge_charset(first->child[dir], -+ i->child[dir]); -+ p->child[!dir] = i->child[!dir]; -+ Node *tmp = i; -+ i = tmp->child[!dir]; -+ tmp->child[!dir] = NULL; -+ tmp->release(); -+ } -+ } else { -+ p = i; -+ i = i->child[!dir]; -+ } -+ } -+ // last altnode of chain check other dir as well -+ if (first && (dynamic_cast(i) || -+ dynamic_cast(i))) { -+ -+ } -+*/ -+ -+/* -+ if (dynamic_cast(t->child[dir]) || -+ dynamic_cast(t->child[dir])) -+ char_test = true; -+ (char_test && -+ (dynamic_cast(i->child[dir]) || -+ dynamic_cast(i->child[dir])))) { -+*/ -+ return t; -+} -+#endif -+ -+static Node *basic_alt_factor(Node *t, int dir) -+{ -+ if (!dynamic_cast(t)) -+ return t; -+ -+ if (t->child[dir]->eq(t->child[!dir])) { -+ // (a | a) -> a -+ Node *tmp = t->child[dir]; -+ t->child[dir] = NULL; -+ t->release(); -+ return tmp; -+ } -+ -+ // (ab) | (ac) -> a(b|c) -+ if (dynamic_cast(t->child[dir]) && -+ dynamic_cast(t->child[!dir]) && -+ t->child[dir]->child[dir]->eq(t->child[!dir]->child[dir])) { -+ // (ab) | (ac) -> a(b|c) -+ Node *left = t->child[dir]; -+ Node *right = t->child[!dir]; -+ t->child[dir] = left->child[!dir]; -+ t->child[!dir] = right->child[!dir]; -+ right->child[!dir] = NULL; -+ right->release(); -+ left->child[!dir] = t; -+ return left; -+ } -+ -+ // a | (ab) -> a (E | b) -> a (b | E) -+ if (dynamic_cast(t->child[!dir]) && -+ t->child[dir]->eq(t->child[!dir]->child[dir])) { -+ Node *c = t->child[!dir]; -+ t->child[dir]->release(); -+ t->child[dir] = c->child[!dir]; -+ t->child[!dir] = &epsnode; -+ c->child[!dir] = t; -+ return c; -+ } -+ -+ // ab | (a) -> a (b | E) -+ if (dynamic_cast(t->child[dir]) && -+ t->child[dir]->child[dir]->eq(t->child[!dir])) { -+ Node *c = t->child[dir]; -+ t->child[!dir]->release(); -+ t->child[dir] = c->child[!dir]; -+ t->child[!dir] = &epsnode; -+ c->child[!dir] = t; -+ return c; -+ } -+ -+ return t; -+} -+ -+static Node *basic_simplify(Node *t, int dir) -+{ -+ if (dynamic_cast(t) && -+ &epsnode == t->child[!dir]) { -+ // aE -> a -+ Node *tmp = t->child[dir]; -+ t->child[dir] = NULL; -+ t->release(); -+ return tmp; -+ } -+ -+ return basic_alt_factor(t, dir); -+} -+ -+/* -+ * assumes a normalized tree. reductions shown for left normalization -+ * aE -> a -+ * (a | a) -> a -+ ** factoring patterns -+ * a | (a | b) -> (a | b) -+ * a | (ab) -> a (E | b) -> a (b | E) -+ * (ab) | (ac) -> a(b|c) -+ * -+ * returns t - if no simplifications were made -+ * a new root node - if simplifications were made -+ */ -+Node *simplify_tree_base(Node *t, int dir, bool &mod) -+{ -+ if (dynamic_cast(t)) -+ return t; -+ -+ for (int i=0; i < 2; i++) { -+ if (t->child[i]) { -+ Node *c = simplify_tree_base(t->child[i], dir, mod); -+ if (c != t->child[i]) { -+ t->child[i] = c; -+ mod = true; -+ } -+ } -+ } -+ -+ // only iterate on loop if modification made -+ for (;; mod = true) { -+ -+ Node *tmp = basic_simplify(t, dir); -+ if (tmp != t) { -+ t = tmp; -+ continue; -+ } -+ -+ -+ /* all tests after this must meet 2 alt node condition */ -+ if (!dynamic_cast(t) || -+ !dynamic_cast(t->child[!dir])) -+ break; -+ -+ // a | (a | b) -> (a | b) -+ // a | (b | (c | a)) -> (b | (c | a)) -+ Node *p = t; -+ Node *i = t->child[!dir]; -+ for (;dynamic_cast(i); p = i, i = i->child[!dir]) { -+ if (t->child[dir]->eq(i->child[dir])) { -+ Node *tmp = t->child[!dir]; -+ t->child[!dir] = NULL; -+ t->release(); -+ t = tmp; -+ continue; -+ } -+ } -+ // last altnode of chain check other dir as well -+ if (t->child[dir]->eq(p->child[!dir])) { -+ Node *tmp = t->child[!dir]; -+ t->child[!dir] = NULL; -+ t->release(); -+ t = tmp; -+ continue; -+ } -+ -+ //exact match didn't work, try factoring front -+ //a | (ac | (ad | () -> (a (E | c)) | (...) -+ //ab | (ac | (...)) -> (a (b | c)) | (...) -+ //ab | (a | (...)) -> (a (b | E)) | (...) -+ Node *pp; -+ int count = 0; -+ Node *subject = t->child[dir]; -+ Node *a = subject; -+ if (dynamic_cast(subject)) -+ a = subject->child[dir]; -+ -+ for (pp = p = t, i = t->child[!dir]; -+ dynamic_cast(i); ) { -+ if ((dynamic_cast(i->child[dir]) && -+ a->eq(i->child[dir]->child[dir])) || -+ (a->eq(i->child[dir]))) { -+ // extract matching alt node -+ p->child[!dir] = i->child[!dir]; -+ i->child[!dir] = subject; -+ subject = basic_simplify(i, dir); -+ if (dynamic_cast(subject)) -+ a = subject->child[dir]; -+ else -+ a = subject; -+ -+ i = p->child[!dir]; -+ count++; -+ } else { -+ pp = p; p = i; i = i->child[!dir]; -+ } -+ } -+ -+ // last altnode in chain check other dir as well -+ if ((dynamic_cast(i) && -+ a->eq(i->child[dir])) || -+ (a->eq(i))) { -+ count++; -+ if (t == p) { -+ t->child[dir] = subject; -+ t = basic_simplify(t, dir); -+ } else { -+ t->child[dir] = p->child[dir]; -+ p->child[dir] = subject; -+ pp->child[!dir] = basic_simplify(p, dir); -+ } -+ } else { -+ t->child[dir] = i; -+ p->child[!dir] = subject; -+ } -+ -+ if (count == 0) -+ break; -+ } -+ return t; -+} -+ -+int debug_tree(Node *t) -+{ -+ int nodes = 1; -+ -+ if (!dynamic_cast(t)) { -+ if (t->child[0]) -+ nodes += debug_tree(t->child[0]); -+ if (t->child[1]) -+ nodes += debug_tree(t->child[1]); -+ } -+ return nodes; -+} -+ -+struct node_counts { -+ int charnode; -+ int charset; -+ int notcharset; -+ int alt; -+ int plus; -+ int star; -+ int any; -+ int cat; -+}; -+ -+ -+static void count_tree_nodes(Node *t, struct node_counts *counts) -+{ -+ if (dynamic_cast(t)) { -+ counts->alt++; -+ count_tree_nodes(t->child[0], counts); -+ count_tree_nodes(t->child[1], counts); -+ } else if (dynamic_cast(t)) { -+ counts->cat++; -+ count_tree_nodes(t->child[0], counts); -+ count_tree_nodes(t->child[1], counts); -+ } else if (dynamic_cast(t)) { -+ counts->plus++; -+ count_tree_nodes(t->child[0], counts); -+ } else if (dynamic_cast(t)) { -+ counts->star++; -+ count_tree_nodes(t->child[0], counts); -+ } else if (dynamic_cast(t)) { -+ counts->charnode++; -+ } else if (dynamic_cast(t)) { -+ counts->any++; -+ } else if (dynamic_cast(t)) { -+ counts->charset++; -+ } else if (dynamic_cast(t)) { -+ counts->notcharset++; -+ } -+} -+ -+#include "stdio.h" -+#include "stdint.h" -+#include "apparmor_re.h" -+ -+Node *simplify_tree(Node *t, dfaflags_t flags) -+{ -+ bool update; -+ -+ if (flags & DFA_DUMP_TREE_STATS) { -+ struct node_counts counts = { 0, 0, 0, 0, 0, 0, 0, 0 }; -+ count_tree_nodes(t, &counts); -+ fprintf(stderr, "expr tree: c %d, [] %d, [^] %d, | %d, + %d, * %d, . %d, cat %d\n", counts.charnode, counts.charset, counts.notcharset, counts.alt, counts.plus, counts.star, counts.any, counts.cat); -+ } -+ do { -+ update = false; -+ //default to right normalize first as this reduces the number -+ //of trailing nodes which might follow an internal * -+ //or **, which is where state explosion can happen -+ //eg. in one test this makes the difference between -+ // the dfa having about 7 thousands states, -+ // and it having about 1.25 million states -+ int dir = 1; -+ if (flags & DFA_CONTROL_TREE_LEFT) -+ dir = 0; -+ for (int count = 0; count < 2; count++) { -+ bool modified; -+ do { -+ modified = false; -+ if (flags & DFA_CONTROL_TREE_NORMAL) -+ normalize_tree(t, dir); -+ t = simplify_tree_base(t, dir, modified); -+ if (modified) -+ update = true; -+ } while (modified); -+ if (flags & DFA_CONTROL_TREE_LEFT) -+ dir++; -+ else -+ dir--; -+ } -+ } while(update); -+ if (flags & DFA_DUMP_TREE_STATS) { -+ struct node_counts counts = { 0, 0, 0, 0, 0, 0, 0, 0 }; -+ count_tree_nodes(t, &counts); -+ fprintf(stderr, "simplified expr tree: c %d, [] %d, [^] %d, | %d, + %d, * %d, . %d, cat %d\n", counts.charnode, counts.charset, counts.notcharset, counts.alt, counts.plus, counts.star, counts.any, counts.cat); -+ } -+ return t; -+} -+ -+ -+%} -+ -+%union { -+ char c; -+ Node *node; -+ Chars *cset; -+} -+ -+%{ -+ void regexp_error(Node **, const char *, const char *); -+# define YYLEX_PARAM &text -+ int regexp_lex(YYSTYPE *, const char **); -+ -+ static inline Chars* -+ insert_char(Chars* cset, uchar a) -+ { -+ cset->insert(a); -+ return cset; -+ } -+ -+ static inline Chars* -+ insert_char_range(Chars* cset, uchar a, uchar b) -+ { -+ if (a > b) -+ swap(a, b); -+ for (uchar i = a; i <= b; i++) -+ cset->insert(i); -+ return cset; -+ } -+%} -+ -+%pure-parser -+/* %error-verbose */ -+%parse-param {Node **root} -+%parse-param {const char *text} -+%name-prefix = "regexp_" -+ -+%token CHAR -+%type regex_char cset_char1 cset_char cset_charN -+%type charset cset_chars -+%type regexp expr terms0 terms qterm term -+ -+/** -+ * Note: destroy all nodes upon failure, but *not* the start symbol once -+ * parsing succeeds! -+ */ -+%destructor { $$->release(); } expr terms0 terms qterm term -+ -+%% -+ -+/* FIXME: Does not parse "[--]", "[---]", "[^^-x]". I don't actually know -+ which precise grammer Perl regexps use, and rediscovering that -+ is proving to be painful. */ -+ -+regexp : /* empty */ { *root = $$ = &epsnode; } -+ | expr { *root = $$ = $1; } -+ ; -+ -+expr : terms -+ | expr '|' terms0 { $$ = new AltNode($1, $3); } -+ | '|' terms0 { $$ = new AltNode(&epsnode, $2); } -+ ; -+ -+terms0 : /* empty */ { $$ = &epsnode; } -+ | terms -+ ; -+ -+terms : qterm -+ | terms qterm { $$ = new CatNode($1, $2); } -+ ; -+ -+qterm : term -+ | term '*' { $$ = new StarNode($1); } -+ | term '+' { $$ = new PlusNode($1); } -+ ; -+ -+term : '.' { $$ = new AnyCharNode; } -+ | regex_char { $$ = new CharNode($1); } -+ | '[' charset ']' { $$ = new CharSetNode(*$2); -+ delete $2; } -+ | '[' '^' charset ']' -+ { $$ = new NotCharSetNode(*$3); -+ delete $3; } -+ | '[' '^' '^' cset_chars ']' -+ { $4->insert('^'); -+ $$ = new NotCharSetNode(*$4); -+ delete $4; } -+ | '(' regexp ')' { $$ = $2; } -+ ; -+ -+regex_char : CHAR -+ | '^' { $$ = '^'; } -+ | '-' { $$ = '-'; } -+ | ']' { $$ = ']'; } -+ ; -+ -+charset : cset_char1 cset_chars -+ { $$ = insert_char($2, $1); } -+ | cset_char1 '-' cset_charN cset_chars -+ { $$ = insert_char_range($4, $1, $3); } -+ ; -+ -+cset_chars : /* nothing */ { $$ = new Chars; } -+ | cset_chars cset_charN -+ { $$ = insert_char($1, $2); } -+ | cset_chars cset_charN '-' cset_charN -+ { $$ = insert_char_range($1, $2, $4); } -+ ; -+ -+cset_char1 : cset_char -+ | ']' { $$ = ']'; } -+ | '-' { $$ = '-'; } -+ ; -+ -+cset_charN : cset_char -+ | '^' { $$ = '^'; } -+ ; -+ -+cset_char : CHAR -+ | '[' { $$ = '['; } -+ | '*' { $$ = '*'; } -+ | '+' { $$ = '+'; } -+ | '.' { $$ = '.'; } -+ | '|' { $$ = '|'; } -+ | '(' { $$ = '('; } -+ | ')' { $$ = ')'; } -+ ; -+ -+%% -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include "../immunix.h" -+ -+/* Traverse the syntax tree depth-first in an iterator-like manner. */ -+class depth_first_traversal { -+ stack pos; -+ void push_left(Node *node) -+ { -+ pos.push(node); -+ -+ while (dynamic_cast(node)) { -+ pos.push(node->child[0]); -+ node = node->child[0]; -+ } -+ } -+ -+public: -+ depth_first_traversal(Node *node) { -+ push_left(node); -+ } -+ Node *operator*() -+ { -+ return pos.top(); -+ } -+ Node* operator->() -+ { -+ return pos.top(); -+ } -+ operator bool() -+ { -+ return !pos.empty(); -+ } -+ void operator++(int) -+ { -+ Node *last = pos.top(); -+ pos.pop(); -+ -+ if (!pos.empty()) { -+ /* no need to dynamic cast, as we just popped a node so the top node -+ * must be an inner node */ -+ InnerNode *node = (InnerNode *)(pos.top()); -+ -+ if (node->child[1] && node->child[1] != last) { -+ push_left(node->child[1]); -+ } -+ } -+ } -+}; -+ -+ostream& operator<<(ostream& os, Node& node) -+{ -+ node.dump(os); -+ return os; -+} -+ -+ostream& operator<<(ostream& os, uchar c) -+{ -+ const char *search = "\a\033\f\n\r\t|*+[](). ", -+ *replace = "aefnrt|*+[](). ", *s; -+ -+ if ((s = strchr(search, c)) && *s != '\0') -+ os << '\\' << replace[s - search]; -+ else if (c < 32 || c >= 127) -+ os << '\\' << '0' << char('0' + (c >> 6)) -+ << char('0' + ((c >> 3) & 7)) << char('0' + (c & 7)); -+ else -+ os << (char)c; -+ return os; -+} -+ -+int -+octdigit(char c) -+{ -+ if (c >= '0' && c <= '7') -+ return c - '0'; -+ return -1; -+} -+ -+int -+hexdigit(char c) -+{ -+ if (c >= '0' && c <= '9') -+ return c - '0'; -+ else if (c >= 'A' && c <= 'F') -+ return 10 + c - 'A'; -+ else if (c >= 'a' && c <= 'f') -+ return 10 + c - 'A'; -+ else -+ return -1; -+} -+ -+int -+regexp_lex(YYSTYPE *val, const char **pos) -+{ -+ int c; -+ -+ val->c = **pos; -+ switch(*(*pos)++) { -+ case '\0': -+ (*pos)--; -+ return 0; -+ -+ case '*': case '+': case '.': case '|': case '^': case '-': -+ case '[': case ']': case '(' : case ')': -+ return *(*pos - 1); -+ -+ case '\\': -+ val->c = **pos; -+ switch(*(*pos)++) { -+ case '\0': -+ (*pos)--; -+ /* fall through */ -+ case '\\': -+ val->c = '\\'; -+ break; -+ -+ case '0': -+ val->c = 0; -+ if ((c = octdigit(**pos)) >= 0) { -+ val->c = c; -+ (*pos)++; -+ } -+ if ((c = octdigit(**pos)) >= 0) { -+ val->c = (val->c << 3) + c; -+ (*pos)++; -+ } -+ if ((c = octdigit(**pos)) >= 0) { -+ val->c = (val->c << 3) + c; -+ (*pos)++; -+ } -+ break; -+ -+ case 'x': -+ val->c = 0; -+ if ((c = hexdigit(**pos)) >= 0) { -+ val->c = c; -+ (*pos)++; -+ } -+ if ((c = hexdigit(**pos)) >= 0) { -+ val->c = (val->c << 4) + c; -+ (*pos)++; -+ } -+ break; -+ -+ case 'a': -+ val->c = '\a'; -+ break; -+ -+ case 'e': -+ val->c = 033 /* ESC */; -+ break; -+ -+ case 'f': -+ val->c = '\f'; -+ break; -+ -+ case 'n': -+ val->c = '\n'; -+ break; -+ -+ case 'r': -+ val->c = '\r'; -+ break; -+ -+ case 't': -+ val->c = '\t'; -+ break; -+ } -+ } -+ return CHAR; -+} -+ -+void -+regexp_error(Node ** __attribute__((unused)), -+ const char *text __attribute__((unused)), -+ const char *error __attribute__((unused))) -+{ -+ /* We don't want the library to print error messages. */ -+} -+ -+/** -+ * Assign a consecutive number to each node. This is only needed for -+ * pretty-printing the debug output. -+ * -+ * The epsnode is labeled 0. Start labeling at 1 -+ */ -+void label_nodes(Node *root) -+{ -+ int nodes = 1; -+ for (depth_first_traversal i(root); i; i++) -+ i->label = nodes++; -+} -+ -+/** -+ * Text-dump a state (for debugging). -+ */ -+ostream& operator<<(ostream& os, const NodeSet& state) -+{ -+ os << '{'; -+ if (!state.empty()) { -+ NodeSet::iterator i = state.begin(); -+ for(;;) { -+ os << (*i)->label; -+ if (++i == state.end()) -+ break; -+ os << ','; -+ } -+ } -+ os << '}'; -+ return os; -+} -+ -+/** -+ * Text-dump the syntax tree (for debugging). -+ */ -+void dump_syntax_tree(ostream& os, Node *node) { -+ for (depth_first_traversal i(node); i; i++) { -+ os << i->label << '\t'; -+ if ((*i)->child[0] == 0) -+ os << **i << '\t' << (*i)->followpos << endl; -+ else { -+ if ((*i)->child[1] == 0) -+ os << (*i)->child[0]->label << **i; -+ else -+ os << (*i)->child[0]->label << **i -+ << (*i)->child[1]->label; -+ os << '\t' << (*i)->firstpos -+ << (*i)->lastpos << endl; -+ } -+ } -+ os << endl; -+} -+ -+/* Comparison operator for sets of . -+ * Compare set hashes, and if the sets have the same hash -+ * do compare pointer comparison on set of , the pointer comparison -+ * allows us to determine which Sets of we have seen already from -+ * new ones when constructing the DFA. -+ */ -+struct deref_less_than { -+ bool operator()(pair const & lhs, pair const & rhs) const -+ { -+ if (lhs.first == rhs.first) -+ return *(lhs.second) < *(rhs.second); -+ else -+ return lhs.first < rhs.first; -+ } -+}; -+ -+unsigned long hash_NodeSet(const NodeSet *ns) -+{ -+ unsigned long hash = 5381; -+ -+ for (NodeSet::iterator i = ns->begin(); i != ns->end(); i++) { -+ hash = ((hash << 5) + hash) + (unsigned long) *i; -+ } -+ -+ return hash; -+} -+ -+class State; -+/** -+ * State cases are identical to NodesCases except they map to State * -+ * instead of NodeSet. -+ * Out-edges from a state to another: we store the follow State -+ * for each input character that is not a default match in cases and -+ * default matches in otherwise as well as in all matching explicit cases -+ * This avoids enumerating all the explicit tranitions for default matches. -+ */ -+typedef struct Cases { -+ typedef map::iterator iterator; -+ iterator begin() { return cases.begin(); } -+ iterator end() { return cases.end(); } -+ -+ Cases() : otherwise(0) { } -+ map cases; -+ State *otherwise; -+} Cases; -+ -+typedef list Partition; -+ -+uint32_t accept_perms(NodeSet *state, uint32_t *audit_ctl, int *error); -+ -+/* -+ * State - DFA individual state information -+ * label: a unique label to identify the state used for pretty printing -+ * the non-matching state is setup to have label == 0 and -+ * the start state is setup to have label == 1 -+ * audit: the audit permission mask for the state -+ * accept: the accept permissions for the state -+ * cases: set of transitions from this state -+ * parition: Is a temporary work variable used during dfa minimization. -+ * it can be replaced with a map, but that is slower and uses more -+ * memory. -+ * nodes: Is a temporary work variable used during dfa creation. It can -+ * be replaced by using the nodemap, but that is slower -+ */ -+class State { -+public: -+ State() : label (0), audit(0), accept(0), cases(), nodes(NULL) { }; -+ State(int l): label (l), audit(0), accept(0), cases(), nodes(NULL) { }; -+ State(int l, NodeSet *n) throw (int): -+ label(l), audit(0), accept(0), cases(), nodes(n) -+ { -+ int error; -+ -+ /* Compute permissions associated with the State. */ -+ accept = accept_perms(nodes, &audit, &error); -+ if (error) { -+cerr << "Failing on accept perms " << error << "\n"; -+ throw error; -+ } -+ }; -+ -+ int label; -+ uint32_t audit, accept; -+ Cases cases; -+ union { -+ Partition *partition; -+ NodeSet *nodes; -+ }; -+}; -+ -+ostream& operator<<(ostream& os, const State& state) -+{ -+ /* dump the state label */ -+ os << '{'; -+ os << state.label; -+ os << '}'; -+ return os; -+} -+ -+typedef map, State *, deref_less_than > NodeMap; -+/* Transitions in the DFA. */ -+ -+/* dfa_stats - structure to group various stats about dfa creation -+ * duplicates - how many duplicate NodeSets where encountered and discarded -+ * proto_max - maximum length of a NodeSet encountered during dfa construction -+ * proto_sum - sum of NodeSet length during dfa construction. Used to find -+ * average length. -+ */ -+typedef struct dfa_stats { -+ unsigned int duplicates, proto_max, proto_sum; -+} dfa_stats_t; -+ -+class DFA { -+ void dump_node_to_dfa(void); -+ State* add_new_state(NodeMap &nodemap, pair index, NodeSet *nodes, dfa_stats_t &stats); -+ void update_state_transitions(NodeMap &nodemap, list &work_queue, State *state, dfa_stats_t &stats); -+ State *find_target_state(NodeMap &nodemap, list &work_queue, -+ NodeSet *nodes, dfa_stats_t &stats); -+public: -+ DFA(Node *root, dfaflags_t flags); -+ virtual ~DFA(); -+ void remove_unreachable(dfaflags_t flags); -+ bool same_mappings(State *s1, State *s2); -+ size_t hash_trans(State *s); -+ void minimize(dfaflags_t flags); -+ void dump(ostream& os); -+ void dump_dot_graph(ostream& os); -+ void dump_uniq_perms(const char *s); -+ map equivalence_classes(dfaflags_t flags); -+ void apply_equivalence_classes(map& eq); -+ Node *root; -+ State *nonmatching, *start; -+ Partition states; -+}; -+ -+State* DFA::add_new_state(NodeMap &nodemap, pair index, NodeSet *nodes, dfa_stats_t &stats) -+{ -+ State *state = new State(nodemap.size(), nodes); -+ states.push_back(state); -+ nodemap.insert(make_pair(index, state)); -+ stats.proto_sum += nodes->size(); -+ if (nodes->size() > stats.proto_max) -+ stats.proto_max = nodes->size(); -+ return state; -+} -+ -+State *DFA::find_target_state(NodeMap &nodemap, list &work_queue, -+ NodeSet *nodes, dfa_stats_t &stats) -+{ -+ State *target; -+ -+ pair index = make_pair(hash_NodeSet(nodes), nodes); -+ -+ map, State *, deref_less_than>::iterator x = nodemap.find(index); -+ -+ if (x == nodemap.end()) { -+ /* set of nodes isn't known so create new state, and nodes to -+ * state mapping -+ */ -+ target = add_new_state(nodemap, index, nodes, stats); -+ work_queue.push_back(target); -+ } else { -+ /* set of nodes already has a mapping so free this one */ -+ stats.duplicates++; -+ delete (nodes); -+ target = x->second; -+ } -+ -+ return target; -+} -+ -+void DFA::update_state_transitions(NodeMap &nodemap, -+ list &work_queue, State *state, -+ dfa_stats_t &stats) -+{ -+ /* Compute possible transitions for state->nodes. This is done by -+ * iterating over all the nodes in state->nodes and combining the -+ * transitions. -+ * -+ * The resultant transition set is a mapping of characters to -+ * sets of nodes. -+ */ -+ NodeCases cases; -+ for (NodeSet::iterator i = state->nodes->begin(); i != state->nodes->end(); i++) -+ (*i)->follow(cases); -+ -+ /* Now for each set of nodes in the computed transitions, make -+ * sure that there is a state that maps to it, and add the -+ * matching case to the state. -+ */ -+ -+ /* check the default transition first */ -+ if (cases.otherwise) -+ state->cases.otherwise = find_target_state(nodemap, work_queue, -+ cases.otherwise, -+ stats);; -+ -+ /* For each transition from *from, check if the set of nodes it -+ * transitions to already has been mapped to a state -+ */ -+ for (NodeCases::iterator j = cases.begin(); j != cases.end(); j++) { -+ State *target; -+ target = find_target_state(nodemap, work_queue, j->second, -+ stats); -+ -+ /* Don't insert transition that the default transition -+ * already covers -+ */ -+ if (target != state->cases.otherwise) -+ state->cases.cases[j->first] = target; -+ } -+} -+ -+ -+/* WARNING: This routine can only be called from within DFA creation as -+ * the nodes value is only valid during dfa construction. -+ */ -+void DFA::dump_node_to_dfa(void) -+{ -+ cerr << "Mapping of States to expr nodes\n" -+ " State <= Nodes\n" -+ "-------------------\n"; -+ for (Partition::iterator i = states.begin(); i != states.end(); i++) -+ cerr << " " << (*i)->label << " <= " << *(*i)->nodes << "\n"; -+} -+ -+/** -+ * Construct a DFA from a syntax tree. -+ */ -+DFA::DFA(Node *root, dfaflags_t flags) : root(root) -+{ -+ dfa_stats_t stats = { 0, 0, 0 }; -+ int i = 0; -+ -+ if (flags & DFA_DUMP_PROGRESS) -+ fprintf(stderr, "Creating dfa:\r"); -+ -+ for (depth_first_traversal i(root); i; i++) { -+ (*i)->compute_nullable(); -+ (*i)->compute_firstpos(); -+ (*i)->compute_lastpos(); -+ } -+ -+ if (flags & DFA_DUMP_PROGRESS) -+ fprintf(stderr, "Creating dfa: followpos\r"); -+ for (depth_first_traversal i(root); i; i++) { -+ (*i)->compute_followpos(); -+ } -+ -+ NodeMap nodemap; -+ NodeSet *emptynode = new NodeSet; -+ nonmatching = add_new_state(nodemap, -+ make_pair(hash_NodeSet(emptynode), emptynode), -+ emptynode, stats); -+ -+ NodeSet *first = new NodeSet(root->firstpos); -+ start = add_new_state(nodemap, make_pair(hash_NodeSet(first), first), -+ first, stats); -+ -+ /* the work_queue contains the states that need to have their -+ * transitions computed. This could be done with a recursive -+ * algorithm instead of a work_queue, but it would be slightly slower -+ * and consume more memory. -+ * -+ * TODO: currently the work_queue is treated in a breadth first -+ * search manner. Test using the work_queue in a depth first -+ * manner, this may help reduce the number of entries on the -+ * work_queue at any given time, thus reducing peak memory use. -+ */ -+ list work_queue; -+ work_queue.push_back(start); -+ -+ while (!work_queue.empty()) { -+ if (i % 1000 == 0 && (flags & DFA_DUMP_PROGRESS)) -+ fprintf(stderr, "\033[2KCreating dfa: queue %ld\tstates %ld\teliminated duplicates %d\r", work_queue.size(), states.size(), stats.duplicates); -+ i++; -+ -+ State *from = work_queue.front(); -+ work_queue.pop_front(); -+ -+ /* Update 'from's transitions, and if it transitions to any -+ * unknown State create it and add it to the work_queue -+ */ -+ update_state_transitions(nodemap, work_queue, from, stats); -+ -+ } /* for (NodeSet *nodes ... */ -+ -+ /* cleanup Sets of nodes used computing the DFA as they are no longer -+ * needed. -+ */ -+ for (depth_first_traversal i(root); i; i++) { -+ (*i)->firstpos.clear(); -+ (*i)->lastpos.clear(); -+ (*i)->followpos.clear(); -+ } -+ -+ if (flags & DFA_DUMP_NODE_TO_DFA) -+ dump_node_to_dfa(); -+ -+ for (NodeMap::iterator i = nodemap.begin(); i != nodemap.end(); i++) -+ delete i->first.second; -+ nodemap.clear(); -+ -+ if (flags & (DFA_DUMP_STATS)) -+ fprintf(stderr, "\033[2KCreated dfa: states %ld,\teliminated duplicates %d,\tprotostate sets: longest %u, avg %u\n", states.size(), stats.duplicates, stats.proto_max, (unsigned int) (stats.proto_sum/states.size())); -+ -+} -+ -+ -+DFA::~DFA() -+{ -+ for (Partition::iterator i = states.begin(); i != states.end(); i++) -+ delete *i; -+} -+ -+class MatchFlag : public AcceptNode { -+public: -+MatchFlag(uint32_t flag, uint32_t audit) : flag(flag), audit(audit) {} -+ ostream& dump(ostream& os) -+ { -+ return os << '<' << flag << '>'; -+ } -+ -+ uint32_t flag; -+ uint32_t audit; -+ }; -+ -+class ExactMatchFlag : public MatchFlag { -+public: -+ ExactMatchFlag(uint32_t flag, uint32_t audit) : MatchFlag(flag, audit) {} -+}; -+ -+class DenyMatchFlag : public MatchFlag { -+public: -+ DenyMatchFlag(uint32_t flag, uint32_t quiet) : MatchFlag(flag, quiet) {} -+}; -+ -+ -+void DFA::dump_uniq_perms(const char *s) -+{ -+ set < pair > uniq; -+ for (Partition::iterator i = states.begin(); i != states.end(); i++) -+ uniq.insert(make_pair((*i)->accept, (*i)->audit)); -+ -+ cerr << "Unique Permission sets: " << s << " (" << uniq.size() << ")\n"; -+ cerr << "----------------------\n"; -+ for (set< pair >::iterator i = uniq.begin(); -+ i != uniq.end(); i++) { -+ cerr << " " << hex << i->first << " " << i->second << dec <<"\n"; -+ } -+} -+ -+ -+/* Remove dead or unreachable states */ -+void DFA::remove_unreachable(dfaflags_t flags) -+{ -+ set reachable; -+ list work_queue; -+ -+ /* find the set of reachable states */ -+ reachable.insert(nonmatching); -+ work_queue.push_back(start); -+ while (!work_queue.empty()) { -+ State *from = work_queue.front(); -+ work_queue.pop_front(); -+ reachable.insert(from); -+ -+ if (from->cases.otherwise && -+ (reachable.find(from->cases.otherwise) == reachable.end())) -+ work_queue.push_back(from->cases.otherwise); -+ -+ for (Cases::iterator j = from->cases.begin(); -+ j != from->cases.end(); j++) { -+ if (reachable.find(j->second) == reachable.end()) -+ work_queue.push_back(j->second); -+ } -+ } -+ -+ /* walk the set of states and remove any that aren't reachable */ -+ if (reachable.size() < states.size()) { -+ int count = 0; -+ Partition::iterator i; -+ Partition::iterator next; -+ for (i = states.begin(); i != states.end(); i = next) { -+ next = i; -+ next++; -+ if (reachable.find(*i) == reachable.end()) { -+ if (flags & DFA_DUMP_UNREACHABLE) { -+ cerr << "unreachable: "<< **i; -+ if (*i == start) -+ cerr << " <=="; -+ if ((*i)->accept) { -+ cerr << " (0x" << hex << (*i)->accept -+ << " " << (*i)->audit << dec << ')'; -+ } -+ cerr << endl; -+ } -+ State *current = *i; -+ states.erase(i); -+ delete(current); -+ count++; -+ } -+ } -+ -+ if (count && (flags & DFA_DUMP_STATS)) -+ cerr << "DFA: states " << states.size() << " removed " -+ << count << " unreachable states\n"; -+ } -+} -+ -+/* test if two states have the same transitions under partition_map */ -+bool DFA::same_mappings(State *s1, State *s2) -+{ -+ if (s1->cases.otherwise && s1->cases.otherwise != nonmatching) { -+ if (!s2->cases.otherwise || s2->cases.otherwise == nonmatching) -+ return false; -+ Partition *p1 = s1->cases.otherwise->partition; -+ Partition *p2 = s2->cases.otherwise->partition; -+ if (p1 != p2) -+ return false; -+ } else if (s2->cases.otherwise && s2->cases.otherwise != nonmatching) { -+ return false; -+ } -+ -+ if (s1->cases.cases.size() != s2->cases.cases.size()) -+ return false; -+ for (Cases::iterator j1 = s1->cases.begin(); j1 != s1->cases.end(); -+ j1++){ -+ Cases::iterator j2 = s2->cases.cases.find(j1->first); -+ if (j2 == s2->cases.end()) -+ return false; -+ Partition *p1 = j1->second->partition; -+ Partition *p2 = j2->second->partition; -+ if (p1 != p2) -+ return false; -+ } -+ -+ return true; -+} -+ -+/* Do simple djb2 hashing against a States transition cases -+ * this provides a rough initial guess at state equivalence as if a state -+ * has a different number of transitions or has transitions on different -+ * cases they will never be equivalent. -+ * Note: this only hashes based off of the alphabet (not destination) -+ * as different destinations could end up being equiv -+ */ -+size_t DFA::hash_trans(State *s) -+{ -+ unsigned long hash = 5381; -+ -+ for (Cases::iterator j = s->cases.begin(); j != s->cases.end(); j++){ -+ hash = ((hash << 5) + hash) + j->first; -+ State *k = j->second; -+ hash = ((hash << 5) + hash) + k->cases.cases.size(); -+ } -+ -+ if (s->cases.otherwise && s->cases.otherwise != nonmatching) { -+ hash = ((hash << 5) + hash) + 5381; -+ State *k = s->cases.otherwise; -+ hash = ((hash << 5) + hash) + k->cases.cases.size(); -+ } -+ -+ hash = (hash << 8) | s->cases.cases.size(); -+ return hash; -+} -+ -+/* minimize the number of dfa states */ -+void DFA::minimize(dfaflags_t flags) -+{ -+ map , Partition *> perm_map; -+ list partitions; -+ -+ /* Set up the initial partitions -+ * minimium of - 1 non accepting, and 1 accepting -+ * if trans hashing is used the accepting and non-accepting partitions -+ * can be further split based on the number and type of transitions -+ * a state makes. -+ * If permission hashing is enabled the accepting partitions can -+ * be further divided by permissions. This can result in not -+ * obtaining a truely minimized dfa but comes close, and can speedup -+ * minimization. -+ */ -+ int accept_count = 0; -+ int final_accept = 0; -+ for (Partition::iterator i = states.begin(); i != states.end(); i++) { -+ uint64_t perm_hash = 0; -+ if (flags & DFA_CONTROL_MINIMIZE_HASH_PERMS) { -+ /* make every unique perm create a new partition */ -+ perm_hash = ((uint64_t)(*i)->audit)<<32 | -+ (uint64_t)(*i)->accept; -+ } else if ((*i)->audit || (*i)->accept) { -+ /* combine all perms together into a single parition */ -+ perm_hash = 1; -+ } /* else not an accept state so 0 for perm_hash */ -+ -+ size_t trans_hash = 0; -+ if (flags & DFA_CONTROL_MINIMIZE_HASH_TRANS) -+ trans_hash = hash_trans(*i); -+ pair group = make_pair(perm_hash, trans_hash); -+ map , Partition *>::iterator p = perm_map.find(group); -+ if (p == perm_map.end()) { -+ Partition *part = new Partition(); -+ part->push_back(*i); -+ perm_map.insert(make_pair(group, part)); -+ partitions.push_back(part); -+ (*i)->partition = part; -+ if (perm_hash) -+ accept_count++; -+ } else { -+ (*i)->partition = p->second; -+ p->second->push_back(*i); -+ } -+ -+ if ((flags & DFA_DUMP_PROGRESS) && -+ (partitions.size() % 1000 == 0)) -+ cerr << "\033[2KMinimize dfa: partitions " << partitions.size() << "\tinit " << partitions.size() << " (accept " << accept_count << ")\r"; -+ } -+ -+ /* perm_map is no longer needed so free the memory it is using. -+ * Don't remove - doing it manually here helps reduce peak memory usage. -+ */ -+ perm_map.clear(); -+ -+ int init_count = partitions.size(); -+ if (flags & DFA_DUMP_PROGRESS) -+ cerr << "\033[2KMinimize dfa: partitions " << partitions.size() << "\tinit " << init_count << " (accept " << accept_count << ")\r"; -+ -+ /* Now do repartitioning until each partition contains the set of -+ * states that are the same. This will happen when the partition -+ * splitting stables. With a worse case of 1 state per partition -+ * ie. already minimized. -+ */ -+ Partition *new_part; -+ int new_part_count; -+ do { -+ new_part_count = 0; -+ for (list ::iterator p = partitions.begin(); -+ p != partitions.end(); p++) { -+ new_part = NULL; -+ State *rep = *((*p)->begin()); -+ Partition::iterator next; -+ for (Partition::iterator s = ++(*p)->begin(); -+ s != (*p)->end(); ) { -+ if (same_mappings(rep, *s)) { -+ ++s; -+ continue; -+ } -+ if (!new_part) { -+ new_part = new Partition; -+ list ::iterator tmp = p; -+ partitions.insert(++tmp, new_part); -+ new_part_count++; -+ } -+ new_part->push_back(*s); -+ s = (*p)->erase(s); -+ } -+ /* remapping partition_map for new_part entries -+ * Do not do this above as it messes up same_mappings -+ */ -+ if (new_part) { -+ for (Partition::iterator m = new_part->begin(); -+ m != new_part->end(); m++) { -+ (*m)->partition = new_part; -+ } -+ } -+ if ((flags & DFA_DUMP_PROGRESS) && -+ (partitions.size() % 100 == 0)) -+ cerr << "\033[2KMinimize dfa: partitions " << partitions.size() << "\tinit " << init_count << " (accept " << accept_count << ")\r"; -+ } -+ } while(new_part_count); -+ -+ if (partitions.size() == states.size()) { -+ if (flags & DFA_DUMP_STATS) -+ cerr << "\033[2KDfa minimization no states removed: partitions " << partitions.size() << "\tinit " << init_count << " (accept " << accept_count << ")\n"; -+ -+ -+ goto out; -+ } -+ -+ /* Remap the dfa so it uses the representative states -+ * Use the first state of a partition as the representative state -+ * At this point all states with in a partion have transitions -+ * to states within the same partitions, however this can slow -+ * down compressed dfa compression as there are more states, -+ */ -+ for (list ::iterator p = partitions.begin(); -+ p != partitions.end(); p++) { -+ /* representative state for this partition */ -+ State *rep = *((*p)->begin()); -+ -+ /* update representative state's transitions */ -+ if (rep->cases.otherwise) { -+ Partition *partition = rep->cases.otherwise->partition; -+ rep->cases.otherwise = *partition->begin(); -+ } -+ for (Cases::iterator c = rep->cases.begin(); -+ c != rep->cases.end(); c++) { -+ Partition *partition = c->second->partition; -+ c->second = *partition->begin(); -+ } -+ -+//if ((*p)->size() > 1) -+//cerr << rep->label << ": "; -+ /* clear the state label for all non representative states, -+ * and accumulate permissions */ -+ for (Partition::iterator i = ++(*p)->begin(); i != (*p)->end(); i++) { -+//cerr << " " << (*i)->label; -+ (*i)->label = -1; -+ rep->accept |= (*i)->accept; -+ rep->audit |= (*i)->audit; -+ } -+ if (rep->accept || rep->audit) -+ final_accept++; -+//if ((*p)->size() > 1) -+//cerr << "\n"; -+ } -+ if (flags & DFA_DUMP_STATS) -+ cerr << "\033[2KMinimized dfa: final partitions " << partitions.size() << " (accept " << final_accept << ")" << "\tinit " << init_count << " (accept " << accept_count << ")\n"; -+ -+ -+ -+ /* make sure nonmatching and start state are up to date with the -+ * mappings */ -+ { -+ Partition *partition = nonmatching->partition; -+ if (*partition->begin() != nonmatching) { -+ nonmatching = *partition->begin(); -+ } -+ -+ partition = start->partition; -+ if (*partition->begin() != start) { -+ start = *partition->begin(); -+ } -+ } -+ -+ /* Now that the states have been remapped, remove all states -+ * that are not the representive states for their partition, they -+ * will have a label == -1 -+ */ -+ for (Partition::iterator i = states.begin(); i != states.end(); ) { -+ if ((*i)->label == -1) { -+ State *s = *i; -+ i = states.erase(i); -+ delete(s); -+ } else -+ i++; -+ } -+ -+out: -+ /* Cleanup */ -+ while (!partitions.empty()) { -+ Partition *p = partitions.front(); -+ partitions.pop_front(); -+ delete(p); -+ } -+} -+ -+/** -+ * text-dump the DFA (for debugging). -+ */ -+void DFA::dump(ostream& os) -+{ -+ for (Partition::iterator i = states.begin(); i != states.end(); i++) { -+ if (*i == start || (*i)->accept) { -+ os << **i; -+ if (*i == start) -+ os << " <=="; -+ if ((*i)->accept) { -+ os << " (0x" << hex << (*i)->accept << " " << (*i)->audit << dec << ')'; -+ } -+ os << endl; -+ } -+ } -+ os << endl; -+ -+ for (Partition::iterator i = states.begin(); i != states.end(); i++) { -+ if ((*i)->cases.otherwise) -+ os << **i << " -> " << (*i)->cases.otherwise << endl; -+ for (Cases::iterator j = (*i)->cases.begin(); j != (*i)->cases.end(); j++) { -+ os << **i << " -> " << j->second << ": " << j->first << endl; -+ } -+ } -+ os << endl; -+} -+ -+/** -+ * Create a dot (graphviz) graph from the DFA (for debugging). -+ */ -+void DFA::dump_dot_graph(ostream& os) -+{ -+ os << "digraph \"dfa\" {" << endl; -+ -+ for (Partition::iterator i = states.begin(); i != states.end(); i++) { -+ if (*i == nonmatching) -+ continue; -+ -+ os << "\t\"" << **i << "\" [" << endl; -+ if (*i == start) { -+ os << "\t\tstyle=bold" << endl; -+ } -+ uint32_t perms = (*i)->accept; -+ if (perms) { -+ os << "\t\tlabel=\"" << **i << "\\n(" -+ << perms << ")\"" << endl; -+ } -+ os << "\t]" << endl; -+ } -+ for (Partition::iterator i = states.begin(); i != states.end(); i++) { -+ Cases& cases = (*i)->cases; -+ Chars excluded; -+ -+ for (Cases::iterator j = cases.begin(); j != cases.end(); j++) { -+ if (j->second == nonmatching) -+ excluded.insert(j->first); -+ else { -+ os << "\t\"" << **i << "\" -> \""; -+ os << j->second << "\" [" << endl; -+ os << "\t\tlabel=\"" << j->first << "\"" << endl; -+ os << "\t]" << endl; -+ } -+ } -+ if (cases.otherwise && cases.otherwise != nonmatching) { -+ os << "\t\"" << **i << "\" -> \"" << cases.otherwise -+ << "\" [" << endl; -+ if (!excluded.empty()) { -+ os << "\t\tlabel=\"[^"; -+ for (Chars::iterator i = excluded.begin(); -+ i != excluded.end(); -+ i++) { -+ os << *i; -+ } -+ os << "]\"" << endl; -+ } -+ os << "\t]" << endl; -+ } -+ } -+ os << '}' << endl; -+} -+ -+/** -+ * Compute character equivalence classes in the DFA to save space in the -+ * transition table. -+ */ -+map DFA::equivalence_classes(dfaflags_t flags) -+{ -+ map classes; -+ uchar next_class = 1; -+ -+ for (Partition::iterator i = states.begin(); i != states.end(); i++) { -+ Cases& cases = (*i)->cases; -+ -+ /* Group edges to the same next state together */ -+ map node_sets; -+ for (Cases::iterator j = cases.begin(); j != cases.end(); j++) -+ node_sets[j->second].insert(j->first); -+ -+ for (map::iterator j = node_sets.begin(); -+ j != node_sets.end(); -+ j++) { -+ /* Group edges to the same next state together by class */ -+ map node_classes; -+ bool class_used = false; -+ for (Chars::iterator k = j->second.begin(); -+ k != j->second.end(); -+ k++) { -+ pair::iterator, bool> x = -+ classes.insert(make_pair(*k, next_class)); -+ if (x.second) -+ class_used = true; -+ pair::iterator, bool> y = -+ node_classes.insert(make_pair(x.first->second, Chars())); -+ y.first->second.insert(*k); -+ } -+ if (class_used) { -+ next_class++; -+ class_used = false; -+ } -+ for (map::iterator k = node_classes.begin(); -+ k != node_classes.end(); -+ k++) { -+ /** -+ * If any other characters are in the same class, move -+ * the characters in this class into their own new class -+ */ -+ map::iterator l; -+ for (l = classes.begin(); l != classes.end(); l++) { -+ if (l->second == k->first && -+ k->second.find(l->first) == k->second.end()) { -+ class_used = true; -+ break; -+ } -+ } -+ if (class_used) { -+ for (Chars::iterator l = k->second.begin(); -+ l != k->second.end(); -+ l++) { -+ classes[*l] = next_class; -+ } -+ next_class++; -+ class_used = false; -+ } -+ } -+ } -+ } -+ -+ if (flags & DFA_DUMP_EQUIV_STATS) -+ fprintf(stderr, "Equiv class reduces to %d classes\n", next_class - 1); -+ return classes; -+} -+ -+/** -+ * Text-dump the equivalence classes (for debugging). -+ */ -+void dump_equivalence_classes(ostream& os, map& eq) -+{ -+ map rev; -+ -+ for (map::iterator i = eq.begin(); i != eq.end(); i++) { -+ Chars& chars = rev.insert(make_pair(i->second, -+ Chars())).first->second; -+ chars.insert(i->first); -+ } -+ os << "(eq):" << endl; -+ for (map::iterator i = rev.begin(); i != rev.end(); i++) { -+ os << (int)i->first << ':'; -+ Chars& chars = i->second; -+ for (Chars::iterator j = chars.begin(); j != chars.end(); j++) { -+ os << ' ' << *j; -+ } -+ os << endl; -+ } -+} -+ -+/** -+ * Replace characters with classes (which are also represented as -+ * characters) in the DFA transition table. -+ */ -+void DFA::apply_equivalence_classes(map& eq) -+{ -+ /** -+ * Note: We only transform the transition table; the nodes continue to -+ * contain the original characters. -+ */ -+ for (Partition::iterator i = states.begin(); i != states.end(); i++) { -+ map tmp; -+ tmp.swap((*i)->cases.cases); -+ for (Cases::iterator j = tmp.begin(); j != tmp.end(); j++) -+ (*i)->cases.cases.insert(make_pair(eq[j->first], j->second)); -+ } -+} -+ -+/** -+ * Flip the children of all cat nodes. This causes strings to be matched -+ * back-forth. -+ */ -+void flip_tree(Node *node) -+{ -+ for (depth_first_traversal i(node); i; i++) { -+ if (CatNode *cat = dynamic_cast(*i)) { -+ swap(cat->child[0], cat->child[1]); -+ } -+ } -+} -+ -+class TransitionTable { -+ typedef vector > DefaultBase; -+ typedef vector > NextCheck; -+public: -+ TransitionTable(DFA& dfa, map& eq, dfaflags_t flags); -+ void dump(ostream& os); -+ void flex_table(ostream& os, const char *name); -+ void init_free_list(vector > &free_list, size_t prev, size_t start); -+ bool fits_in(vector > &free_list, -+ size_t base, Cases& cases); -+ void insert_state(vector > &free_list, -+ State *state, DFA& dfa); -+ -+private: -+ vector accept; -+ vector accept2; -+ DefaultBase default_base; -+ NextCheck next_check; -+ map num; -+ map& eq; -+ uchar max_eq; -+ size_t first_free; -+}; -+ -+ -+void TransitionTable::init_free_list(vector > &free_list, -+ size_t prev, size_t start) { -+ for (size_t i = start; i < free_list.size(); i++) { -+ if (prev) -+ free_list[prev].second = i; -+ free_list[i].first = prev; -+ prev = i; -+ } -+ free_list[free_list.size() -1].second = 0; -+} -+ -+/** -+ * new Construct the transition table. -+ */ -+TransitionTable::TransitionTable(DFA& dfa, map& eq, -+ dfaflags_t flags) -+ : eq(eq) -+{ -+ -+ if (flags & DFA_DUMP_TRANS_PROGRESS) -+ fprintf(stderr, "Compressing trans table:\r"); -+ -+ -+ if (eq.empty()) -+ max_eq = 255; -+ else { -+ max_eq = 0; -+ for(map::iterator i = eq.begin(); i != eq.end(); i++) { -+ if (i->second > max_eq) -+ max_eq = i->second; -+ } -+ } -+ -+ /* Do initial setup adding up all the transitions and sorting by -+ * transition count. -+ */ -+ size_t optimal = 2; -+ multimap order; -+ vector > free_list; -+ -+ for (Partition::iterator i = dfa.states.begin(); i != dfa.states.end(); i++) { -+ if (*i == dfa.start || *i == dfa.nonmatching) -+ continue; -+ optimal += (*i)->cases.cases.size(); -+ if (flags & DFA_CONTROL_TRANS_HIGH) { -+ size_t range = 0; -+ if ((*i)->cases.cases.size()) -+ range = (*i)->cases.cases.rbegin()->first - (*i)->cases.begin()->first; -+ size_t ord = ((256 - (*i)->cases.cases.size()) << 8) | -+ (256 - range); -+ /* reverse sort by entry count, most entries first */ -+ order.insert(make_pair(ord, *i)); -+ } -+ } -+ -+ /* Insert the dummy nonmatching transition by hand */ -+ next_check.push_back(make_pair(dfa.nonmatching, dfa.nonmatching)); -+ default_base.push_back(make_pair(dfa.nonmatching, 0)); -+ num.insert(make_pair(dfa.nonmatching, num.size())); -+ -+ accept.resize(dfa.states.size()); -+ accept2.resize(dfa.states.size()); -+ next_check.resize(optimal); -+ free_list.resize(optimal); -+ -+ accept[0] = 0; -+ accept2[0] = 0; -+ first_free = 1; -+ init_free_list(free_list, 0, 1); -+ -+ insert_state(free_list, dfa.start, dfa); -+ accept[1] = 0; -+ accept2[1] = 0; -+ num.insert(make_pair(dfa.start, num.size())); -+ -+ int count = 2; -+ -+ if (!(flags & DFA_CONTROL_TRANS_HIGH)) { -+ for (Partition::iterator i = dfa.states.begin(); i != dfa.states.end(); -+ i++) { -+ if (*i != dfa.nonmatching && *i != dfa.start) { -+ insert_state(free_list, *i, dfa); -+ accept[num.size()] = (*i)->accept; -+ accept2[num.size()] = (*i)->audit; -+ num.insert(make_pair(*i, num.size())); -+ } -+ if (flags & (DFA_DUMP_TRANS_PROGRESS)) { -+ count++; -+ if (count % 100 == 0) -+ fprintf(stderr, "\033[2KCompressing trans table: insert state: %d/%ld\r", count, dfa.states.size()); -+ } -+ } -+ } else { -+ for (multimap ::iterator i = order.begin(); -+ i != order.end(); i++) { -+ if (i->second != dfa.nonmatching && i->second != dfa.start) { -+ insert_state(free_list, i->second, dfa); -+ accept[num.size()] = i->second->accept; -+ accept2[num.size()] = i->second->audit; -+ num.insert(make_pair(i->second, num.size())); -+ } -+ if (flags & (DFA_DUMP_TRANS_PROGRESS)) { -+ count++; -+ if (count % 100 == 0) -+ fprintf(stderr, "\033[2KCompressing trans table: insert state: %d/%ld\r", count, dfa.states.size()); -+ } -+ } -+ } -+ -+ if (flags & (DFA_DUMP_TRANS_STATS | DFA_DUMP_TRANS_PROGRESS)) { -+ ssize_t size = 4 * next_check.size() + 6 * dfa.states.size(); -+ fprintf(stderr, "\033[2KCompressed trans table: states %ld, next/check %ld, optimal next/check %ld avg/state %.2f, compression %ld/%ld = %.2f %%\n", dfa.states.size(), next_check.size(), optimal, (float)next_check.size()/(float)dfa.states.size(), size, 512 * dfa.states.size(), 100.0 - ((float) size * 100.0 / (float)(512 * dfa.states.size()))); -+ } -+} -+ -+ -+/** -+ * Does fit into position of the transition table? -+ */ -+bool TransitionTable::fits_in(vector > &free_list __attribute__((unused)), -+ size_t pos, Cases& cases) -+{ -+ size_t c, base = pos - cases.begin()->first; -+ for (Cases::iterator i = cases.begin(); i != cases.end(); i++) { -+ c = base + i->first; -+ /* if it overflows the next_check array it fits in as we will -+ * resize */ -+ if (c >= next_check.size()) -+ return true; -+ if (next_check[c].second) -+ return false; -+ } -+ -+ return true; -+} -+ -+/** -+ * Insert of into the transition table. -+ */ -+void TransitionTable::insert_state(vector > &free_list, -+ State *from, DFA& dfa) -+{ -+ State *default_state = dfa.nonmatching; -+ size_t base = 0; -+ int resize; -+ -+ Cases& cases = from->cases; -+ size_t c = cases.begin()->first; -+ size_t prev = 0; -+ size_t x = first_free; -+ -+ if (cases.otherwise) -+ default_state = cases.otherwise; -+ if (cases.cases.empty()) -+ goto do_insert; -+ -+repeat: -+ resize = 0; -+ /* get the first free entry that won't underflow */ -+ while (x && (x < c)) { -+ prev = x; -+ x = free_list[x].second; -+ } -+ -+ /* try inserting until we succeed. */ -+ while (x && !fits_in(free_list, x, cases)) { -+ prev = x; -+ x = free_list[x].second; -+ } -+ if (!x) { -+ resize = 256 - cases.begin()->first; -+ x = free_list.size(); -+ /* set prev to last free */ -+ } else if (x + 255 - cases.begin()->first >= next_check.size()) { -+ resize = (255 - cases.begin()->first - (next_check.size() - 1 - x)); -+ for (size_t y = x; y; y = free_list[y].second) -+ prev = y; -+ } -+ if (resize) { -+ /* expand next_check and free_list */ -+ size_t old_size = free_list.size(); -+ next_check.resize(next_check.size() + resize); -+ free_list.resize(free_list.size() + resize); -+ init_free_list(free_list, prev, old_size); -+ if (!first_free) -+ first_free = old_size;; -+ if (x == old_size) -+ goto repeat; -+ } -+ -+ base = x - c; -+ for (Cases::iterator j = cases.begin(); j != cases.end(); j++) { -+ next_check[base + j->first] = make_pair(j->second, from); -+ size_t prev = free_list[base + j->first].first; -+ size_t next = free_list[base + j->first].second; -+ if (prev) -+ free_list[prev].second = next; -+ if (next) -+ free_list[next].first = prev; -+ if (base + j->first == first_free) -+ first_free = next; -+ } -+ -+do_insert: -+ default_base.push_back(make_pair(default_state, base)); -+} -+ -+/** -+ * Text-dump the transition table (for debugging). -+ */ -+void TransitionTable::dump(ostream& os) -+{ -+ map st; -+ for (map::iterator i = num.begin(); -+ i != num.end(); -+ i++) { -+ st.insert(make_pair(i->second, i->first)); -+ } -+ -+ os << "size=" << default_base.size() << " (accept, default, base): {state} -> {default state}" << endl; -+ for (size_t i = 0; i < default_base.size(); i++) { -+ os << i << ": "; -+ os << "(" << accept[i] << ", " -+ << num[default_base[i].first] << ", " -+ << default_base[i].second << ")"; -+ if (st[i]) -+ os << " " << *st[i]; -+ if (default_base[i].first) -+ os << " -> " << *default_base[i].first; -+ os << endl; -+ } -+ -+ os << "size=" << next_check.size() << " (next, check): {check state} -> {next state} : offset from base" << endl; -+ for (size_t i = 0; i < next_check.size(); i++) { -+ if (!next_check[i].second) -+ continue; -+ -+ os << i << ": "; -+ if (next_check[i].second) { -+ os << "(" << num[next_check[i].first] << ", " -+ << num[next_check[i].second] << ")" << " " -+ << *next_check[i].second << " -> " -+ << *next_check[i].first << ": "; -+ -+ size_t offs = i - default_base[num[next_check[i].second]].second; -+ if (eq.size()) -+ os << offs; -+ else -+ os << (uchar)offs; -+ } -+ os << endl; -+ } -+} -+ -+#if 0 -+template -+class FirstIterator { -+public: -+ FirstIterator(Iter pos) : pos(pos) { } -+ typename Iter::value_type::first_type operator*() { return pos->first; } -+ bool operator!=(FirstIterator& i) { return pos != i.pos; } -+ void operator++() { ++pos; } -+ ssize_t operator-(FirstIterator i) { return pos - i.pos; } -+private: -+ Iter pos; -+}; -+ -+template -+FirstIterator first_iterator(Iter iter) -+{ -+ return FirstIterator(iter); -+} -+ -+template -+class SecondIterator { -+public: -+ SecondIterator(Iter pos) : pos(pos) { } -+ typename Iter::value_type::second_type operator*() { return pos->second; } -+ bool operator!=(SecondIterator& i) { return pos != i.pos; } -+ void operator++() { ++pos; } -+ ssize_t operator-(SecondIterator i) { return pos - i.pos; } -+private: -+ Iter pos; -+}; -+ -+template -+SecondIterator second_iterator(Iter iter) -+{ -+ return SecondIterator(iter); -+} -+#endif -+ -+/** -+ * Create a flex-style binary dump of the DFA tables. The table format -+ * was partly reverse engineered from the flex sources and from -+ * examining the tables that flex creates with its --tables-file option. -+ * (Only the -Cf and -Ce formats are currently supported.) -+ */ -+ -+#include "flex-tables.h" -+#include "regexp.h" -+ -+static inline size_t pad64(size_t i) -+{ -+ return (i + (size_t)7) & ~(size_t)7; -+} -+ -+string fill64(size_t i) -+{ -+ const char zeroes[8] = { }; -+ string fill(zeroes, (i & 7) ? 8 - (i & 7) : 0); -+ return fill; -+} -+ -+template -+size_t flex_table_size(Iter pos, Iter end) -+{ -+ return pad64(sizeof(struct table_header) + sizeof(*pos) * (end - pos)); -+} -+ -+template -+void write_flex_table(ostream& os, int id, Iter pos, Iter end) -+{ -+ struct table_header td = { 0, 0, 0, 0 }; -+ size_t size = end - pos; -+ -+ td.td_id = htons(id); -+ td.td_flags = htons(sizeof(*pos)); -+ td.td_lolen = htonl(size); -+ os.write((char *)&td, sizeof(td)); -+ -+ for (; pos != end; ++pos) { -+ switch(sizeof(*pos)) { -+ case 4: -+ os.put((char)(*pos >> 24)); -+ os.put((char)(*pos >> 16)); -+ case 2: -+ os.put((char)(*pos >> 8)); -+ case 1: -+ os.put((char)*pos); -+ } -+ } -+ -+ os << fill64(sizeof(td) + sizeof(*pos) * size); -+} -+ -+void TransitionTable::flex_table(ostream& os, const char *name) -+{ -+ const char th_version[] = "notflex"; -+ struct table_set_header th = { 0, 0, 0, 0 }; -+ -+ /** -+ * Change the following two data types to adjust the maximum flex -+ * table size. -+ */ -+ typedef uint16_t state_t; -+ typedef uint32_t trans_t; -+ -+ if (default_base.size() >= (state_t)-1) { -+ cerr << "Too many states (" << default_base.size() << ") for " -+ "type state_t" << endl; -+ exit(1); -+ } -+ if (next_check.size() >= (trans_t)-1) { -+ cerr << "Too many transitions (" << next_check.size() << ") for " -+ "type trans_t" << endl; -+ exit(1); -+ } -+ -+ /** -+ * Create copies of the data structures so that we can dump the tables -+ * using the generic write_flex_table() routine. -+ */ -+ vector equiv_vec; -+ if (eq.size()) { -+ equiv_vec.resize(256); -+ for (map::iterator i = eq.begin(); i != eq.end(); i++) { -+ equiv_vec[i->first] = i->second; -+ } -+ } -+ -+ vector default_vec; -+ vector base_vec; -+ for (DefaultBase::iterator i = default_base.begin(); -+ i != default_base.end(); -+ i++) { -+ default_vec.push_back(num[i->first]); -+ base_vec.push_back(i->second); -+ } -+ -+ vector next_vec; -+ vector check_vec; -+ for (NextCheck::iterator i = next_check.begin(); -+ i != next_check.end(); -+ i++) { -+ next_vec.push_back(num[i->first]); -+ check_vec.push_back(num[i->second]); -+ } -+ -+ /* Write the actual flex parser table. */ -+ -+ size_t hsize = pad64(sizeof(th) + sizeof(th_version) + strlen(name) + 1); -+ th.th_magic = htonl(YYTH_REGEXP_MAGIC); -+ th.th_hsize = htonl(hsize); -+ th.th_ssize = htonl(hsize + -+ flex_table_size(accept.begin(), accept.end()) + -+ flex_table_size(accept2.begin(), accept2.end()) + -+ (eq.size() ? -+ flex_table_size(equiv_vec.begin(), equiv_vec.end()) : 0) + -+ flex_table_size(base_vec.begin(), base_vec.end()) + -+ flex_table_size(default_vec.begin(), default_vec.end()) + -+ flex_table_size(next_vec.begin(), next_vec.end()) + -+ flex_table_size(check_vec.begin(), check_vec.end())); -+ os.write((char *)&th, sizeof(th)); -+ os << th_version << (char)0 << name << (char)0; -+ os << fill64(sizeof(th) + sizeof(th_version) + strlen(name) + 1); -+ -+ -+ write_flex_table(os, YYTD_ID_ACCEPT, accept.begin(), accept.end()); -+ write_flex_table(os, YYTD_ID_ACCEPT2, accept2.begin(), accept2.end()); -+ if (eq.size()) -+ write_flex_table(os, YYTD_ID_EC, equiv_vec.begin(), equiv_vec.end()); -+ write_flex_table(os, YYTD_ID_BASE, base_vec.begin(), base_vec.end()); -+ write_flex_table(os, YYTD_ID_DEF, default_vec.begin(), default_vec.end()); -+ write_flex_table(os, YYTD_ID_NXT, next_vec.begin(), next_vec.end()); -+ write_flex_table(os, YYTD_ID_CHK, check_vec.begin(), check_vec.end()); -+} -+ -+#if 0 -+typedef set AcceptNodes; -+map dominance(DFA& dfa) -+{ -+ map is_dominated; -+ -+ for (States::iterator i = dfa.states.begin(); i != dfa.states.end(); i++) { -+ AcceptNodes set1; -+ for (State::iterator j = (*i)->begin(); j != (*i)->end(); j++) { -+ if (AcceptNode *accept = dynamic_cast(*j)) -+ set1.insert(accept); -+ } -+ for (AcceptNodes::iterator j = set1.begin(); j != set1.end(); j++) { -+ pair::iterator, bool> x = -+ is_dominated.insert(make_pair(*j, set1)); -+ if (!x.second) { -+ AcceptNodes &set2(x.first->second), set3; -+ for (AcceptNodes::iterator l = set2.begin(); -+ l != set2.end(); -+ l++) { -+ if (set1.find(*l) != set1.end()) -+ set3.insert(*l); -+ } -+ set3.swap(set2); -+ } -+ } -+ } -+ return is_dominated; -+} -+#endif -+ -+void dump_regexp_rec(ostream& os, Node *tree) -+{ -+ if (tree->child[0]) -+ dump_regexp_rec(os, tree->child[0]); -+ os << *tree; -+ if (tree->child[1]) -+ dump_regexp_rec(os, tree->child[1]); -+} -+ -+void dump_regexp(ostream& os, Node *tree) -+{ -+ dump_regexp_rec(os, tree); -+ os << endl; -+} -+ -+#include -+#include -+ -+struct aare_ruleset { -+ int reverse; -+ Node *root; -+}; -+ -+extern "C" aare_ruleset_t *aare_new_ruleset(int reverse) -+{ -+ aare_ruleset_t *container = (aare_ruleset_t *) malloc(sizeof(aare_ruleset_t)); -+ if (!container) -+ return NULL; -+ -+ container->root = NULL; -+ container->reverse = reverse; -+ -+ return container; -+} -+ -+extern "C" void aare_delete_ruleset(aare_ruleset_t *rules) -+{ -+ if (rules) { -+ if (rules->root) -+ rules->root->release(); -+ free(rules); -+ } -+} -+ -+static inline int diff_qualifiers(uint32_t perm1, uint32_t perm2) -+{ -+ return ((perm1 & AA_EXEC_TYPE) && (perm2 & AA_EXEC_TYPE) && -+ (perm1 & AA_EXEC_TYPE) != (perm2 & AA_EXEC_TYPE)); -+} -+ -+/** -+ * Compute the permission flags that this state corresponds to. If we -+ * have any exact matches, then they override the execute and safe -+ * execute flags. -+ */ -+uint32_t accept_perms(NodeSet *state, uint32_t *audit_ctl, int *error) -+{ -+ uint32_t perms = 0, exact_match_perms = 0, audit = 0, exact_audit = 0, -+ quiet = 0, deny = 0; -+ -+ if (error) -+ *error = 0; -+ for (NodeSet::iterator i = state->begin(); i != state->end(); i++) { -+ MatchFlag *match; -+ if (!(match= dynamic_cast(*i))) -+ continue; -+ if (dynamic_cast(match)) { -+ /* exact match only ever happens with x */ -+ if (!is_merged_x_consistent(exact_match_perms, -+ match->flag) && error) -+ *error = 1;; -+ exact_match_perms |= match->flag; -+ exact_audit |= match->audit; -+ } else if (dynamic_cast(match)) { -+ deny |= match->flag; -+ quiet |= match->audit; -+ } else { -+ if (!is_merged_x_consistent(perms, match->flag) && error) -+ *error = 1; -+ perms |= match->flag; -+ audit |= match->audit; -+ } -+ } -+ -+//if (audit || quiet) -+//fprintf(stderr, "perms: 0x%x, audit: 0x%x exact: 0x%x eaud: 0x%x deny: 0x%x quiet: 0x%x\n", perms, audit, exact_match_perms, exact_audit, deny, quiet); -+ -+ perms |= exact_match_perms & -+ ~(AA_USER_EXEC_TYPE | AA_OTHER_EXEC_TYPE); -+ -+ if (exact_match_perms & AA_USER_EXEC_TYPE) { -+ perms = (exact_match_perms & AA_USER_EXEC_TYPE) | -+ (perms & ~AA_USER_EXEC_TYPE); -+ audit = (exact_audit & AA_USER_EXEC_TYPE) | -+ (audit & ~ AA_USER_EXEC_TYPE); -+ } -+ if (exact_match_perms & AA_OTHER_EXEC_TYPE) { -+ perms = (exact_match_perms & AA_OTHER_EXEC_TYPE) | -+ (perms & ~AA_OTHER_EXEC_TYPE); -+ audit = (exact_audit & AA_OTHER_EXEC_TYPE) | -+ (audit & ~AA_OTHER_EXEC_TYPE); -+ } -+ if (perms & AA_USER_EXEC & deny) -+ perms &= ~AA_USER_EXEC_TYPE; -+ -+ if (perms & AA_OTHER_EXEC & deny) -+ perms &= ~AA_OTHER_EXEC_TYPE; -+ -+ perms &= ~deny; -+ -+ if (audit_ctl) -+ *audit_ctl = PACK_AUDIT_CTL(audit, quiet & deny); -+ -+// if (perms & AA_ERROR_BIT) { -+// fprintf(stderr, "error bit 0x%x\n", perms); -+// exit(255); -+//} -+ -+ //if (perms & AA_EXEC_BITS) -+ //fprintf(stderr, "accept perm: 0x%x\n", perms); -+ /* -+ if (perms & ~AA_VALID_PERMS) -+ yyerror(_("Internal error accumulated invalid perm 0x%llx\n"), perms); -+ */ -+ -+//if (perms & AA_CHANGE_HAT) -+// fprintf(stderr, "change_hat 0x%x\n", perms); -+ -+ if (*error) -+ fprintf(stderr, "profile has merged rule with conflicting x modifiers\n"); -+ -+ return perms; -+} -+ -+extern "C" int aare_add_rule(aare_ruleset_t *rules, char *rule, int deny, -+ uint32_t perms, uint32_t audit, dfaflags_t flags) -+{ -+ return aare_add_rule_vec(rules, deny, perms, audit, 1, &rule, flags); -+} -+ -+#define FLAGS_WIDTH 2 -+#define MATCH_FLAGS_SIZE (sizeof(uint32_t) * 8 - 1) -+MatchFlag *match_flags[FLAGS_WIDTH][MATCH_FLAGS_SIZE]; -+DenyMatchFlag *deny_flags[FLAGS_WIDTH][MATCH_FLAGS_SIZE]; -+#define EXEC_MATCH_FLAGS_SIZE (AA_EXEC_COUNT *2 * 2 * 2) /* double for each of ix pux, unsafe x bits * u::o */ -+MatchFlag *exec_match_flags[FLAGS_WIDTH][EXEC_MATCH_FLAGS_SIZE]; /* mods + unsafe + ix + pux * u::o*/ -+ExactMatchFlag *exact_match_flags[FLAGS_WIDTH][EXEC_MATCH_FLAGS_SIZE];/* mods + unsafe + ix + pux *u::o*/ -+ -+extern "C" void aare_reset_matchflags(void) -+{ -+ uint32_t i, j; -+#define RESET_FLAGS(group, size) { \ -+ for (i = 0; i < FLAGS_WIDTH; i++) { \ -+ for (j = 0; j < size; j++) { \ -+ if ((group)[i][j]) delete (group)[i][j]; \ -+ (group)[i][j] = NULL; \ -+ } \ -+ } \ -+} -+ RESET_FLAGS(match_flags,MATCH_FLAGS_SIZE); -+ RESET_FLAGS(deny_flags,MATCH_FLAGS_SIZE); -+ RESET_FLAGS(exec_match_flags,EXEC_MATCH_FLAGS_SIZE); -+ RESET_FLAGS(exact_match_flags,EXEC_MATCH_FLAGS_SIZE); -+#undef RESET_FLAGS -+} -+ -+extern "C" int aare_add_rule_vec(aare_ruleset_t *rules, int deny, -+ uint32_t perms, uint32_t audit, -+ int count, char **rulev, -+ dfaflags_t flags) -+{ -+ Node *tree = NULL, *accept; -+ int exact_match; -+ -+ assert(perms != 0); -+ -+ if (regexp_parse(&tree, rulev[0])) -+ return 0; -+ for (int i = 1; i < count; i++) { -+ Node *subtree = NULL; -+ Node *node = new CharNode(0); -+ if (!node) -+ return 0; -+ tree = new CatNode(tree, node); -+ if (regexp_parse(&subtree, rulev[i])) -+ return 0; -+ tree = new CatNode(tree, subtree); -+ } -+ -+ /* -+ * Check if we have an expression with or without wildcards. This -+ * determines how exec modifiers are merged in accept_perms() based -+ * on how we split permission bitmasks here. -+ */ -+ exact_match = 1; -+ for (depth_first_traversal i(tree); i; i++) { -+ if (dynamic_cast(*i) || -+ dynamic_cast(*i) || -+ dynamic_cast(*i) || -+ dynamic_cast(*i) || -+ dynamic_cast(*i)) -+ exact_match = 0; -+ } -+ -+ if (rules->reverse) -+ flip_tree(tree); -+ -+ -+/* 0x7f == 4 bits x mods + 1 bit unsafe mask + 1 bit ix, + 1 pux after shift */ -+#define EXTRACT_X_INDEX(perm, shift) (((perm) >> (shift + 7)) & 0x7f) -+ -+//if (perms & ALL_AA_EXEC_TYPE && (!perms & AA_EXEC_BITS)) -+// fprintf(stderr, "adding X rule without MAY_EXEC: 0x%x %s\n", perms, rulev[0]); -+ -+//if (perms & ALL_EXEC_TYPE) -+// fprintf(stderr, "adding X rule %s 0x%x\n", rulev[0], perms); -+ -+//if (audit) -+//fprintf(stderr, "adding rule with audit bits set: 0x%x %s\n", audit, rulev[0]); -+ -+//if (perms & AA_CHANGE_HAT) -+// fprintf(stderr, "adding change_hat rule %s\n", rulev[0]); -+ -+/* the permissions set is assumed to be non-empty if any audit -+ * bits are specified */ -+ accept = NULL; -+ for (unsigned int n = 0; perms && n < (sizeof(perms) * 8) ; n++) { -+ uint32_t mask = 1 << n; -+ -+ if (perms & mask) { -+ int ai = audit & mask ? 1 : 0; -+ perms &= ~mask; -+ -+ Node *flag; -+ if (mask & ALL_AA_EXEC_TYPE) -+ /* these cases are covered by EXEC_BITS */ -+ continue; -+ if (deny) { -+ if (deny_flags[ai][n]) { -+ flag = deny_flags[ai][n]; -+ } else { -+//fprintf(stderr, "Adding deny ai %d mask 0x%x audit 0x%x\n", ai, mask, audit & mask); -+ deny_flags[ai][n] = new DenyMatchFlag(mask, audit&mask); -+ flag = deny_flags[ai][n]; -+ } -+ } else if (mask & AA_EXEC_BITS) { -+ uint32_t eperm = 0; -+ uint32_t index = 0; -+ if (mask & AA_USER_EXEC) { -+ eperm = mask | (perms & AA_USER_EXEC_TYPE); -+ index = EXTRACT_X_INDEX(eperm, AA_USER_SHIFT); -+ } else { -+ eperm = mask | (perms & AA_OTHER_EXEC_TYPE); -+ index = EXTRACT_X_INDEX(eperm, AA_OTHER_SHIFT) + (AA_EXEC_COUNT << 2); -+ } -+//fprintf(stderr, "index %d eperm 0x%x\n", index, eperm); -+ if (exact_match) { -+ if (exact_match_flags[ai][index]) { -+ flag = exact_match_flags[ai][index]; -+ } else { -+ exact_match_flags[ai][index] = new ExactMatchFlag(eperm, audit&mask); -+ flag = exact_match_flags[ai][index]; -+ } -+ } else { -+ if (exec_match_flags[ai][index]) { -+ flag = exec_match_flags[ai][index]; -+ } else { -+ exec_match_flags[ai][index] = new MatchFlag(eperm, audit&mask); -+ flag = exec_match_flags[ai][index]; -+ } -+ } -+ } else { -+ if (match_flags[ai][n]) { -+ flag = match_flags[ai][n]; -+ } else { -+ match_flags[ai][n] = new MatchFlag(mask, audit&mask); -+ flag = match_flags[ai][n]; -+ } -+ } -+ if (accept) -+ accept = new AltNode(accept, flag); -+ else -+ accept = flag; -+ } -+ } -+ -+ if (flags & DFA_DUMP_RULE_EXPR) { -+ cerr << "rule: "; -+ cerr << rulev[0]; -+ for (int i = 1; i < count; i++) { -+ cerr << "\\x00"; -+ cerr << rulev[i]; -+ } -+ cerr << " -> "; -+ tree->dump(cerr); -+ cerr << "\n\n"; -+ } -+ -+ if (rules->root) -+ rules->root = new AltNode(rules->root, new CatNode(tree, accept)); -+ else -+ rules->root = new CatNode(tree, accept); -+ -+ return 1; -+ -+} -+ -+/* create a dfa from the ruleset -+ * returns: buffer contain dfa tables, @size set to the size of the tables -+ * else NULL on failure -+ */ -+extern "C" void *aare_create_dfa(aare_ruleset_t *rules, size_t *size, dfaflags_t flags) -+{ -+ char *buffer = NULL; -+ -+ label_nodes(rules->root); -+ if (flags & DFA_DUMP_TREE) { -+ cerr << "\nDFA: Expression Tree\n"; -+ rules->root->dump(cerr); -+ cerr << "\n\n"; -+ } -+ -+ if (flags & DFA_CONTROL_TREE_SIMPLE) { -+ rules->root = simplify_tree(rules->root, flags); -+ -+ if (flags & DFA_DUMP_SIMPLE_TREE) { -+ cerr << "\nDFA: Simplified Expression Tree\n"; -+ rules->root->dump(cerr); -+ cerr << "\n\n"; -+ } -+ } -+ -+ stringstream stream; -+ try { -+ DFA dfa(rules->root, flags); -+ if (flags & DFA_DUMP_UNIQ_PERMS) -+ dfa.dump_uniq_perms("dfa"); -+ -+ if (flags & DFA_CONTROL_MINIMIZE) { -+ dfa.minimize(flags); -+ -+ if (flags & DFA_DUMP_MIN_UNIQ_PERMS) -+ dfa.dump_uniq_perms("minimized dfa"); -+ } -+ if (flags & DFA_CONTROL_REMOVE_UNREACHABLE) -+ dfa.remove_unreachable(flags); -+ -+ if (flags & DFA_DUMP_STATES) -+ dfa.dump(cerr); -+ -+ if (flags & DFA_DUMP_GRAPH) -+ dfa.dump_dot_graph(cerr); -+ -+ map eq; -+ if (flags & DFA_CONTROL_EQUIV) { -+ eq = dfa.equivalence_classes(flags); -+ dfa.apply_equivalence_classes(eq); -+ -+ if (flags & DFA_DUMP_EQUIV) { -+ cerr << "\nDFA equivalence class\n"; -+ dump_equivalence_classes(cerr, eq); -+ } -+ } else if (flags & DFA_DUMP_EQUIV) -+ cerr << "\nDFA did not generate an equivalence class\n"; -+ -+ TransitionTable transition_table(dfa, eq, flags); -+ if (flags & DFA_DUMP_TRANS_TABLE) -+ transition_table.dump(cerr); -+ transition_table.flex_table(stream, ""); -+ } catch (int error) { -+ *size = 0; -+ return NULL; -+ } -+ -+ stringbuf *buf = stream.rdbuf(); -+ -+ buf->pubseekpos(0); -+ *size = buf->in_avail(); -+ -+ buffer = (char *)malloc(*size); -+ if (!buffer) -+ return NULL; -+ buf->sgetn(buffer, *size); -+ return buffer; -+} ---- a/parser/parser_alias.c -+++ b/parser/parser_alias.c -@@ -15,6 +15,7 @@ - * along with this program; if not, contact Novell, Inc. - */ - -+#define _GNU_SOURCE 1 - #include - #include - #include ---- a/parser/parser_main.c -+++ b/parser/parser_main.c -@@ -41,7 +41,6 @@ - #include - - #include "parser.h" --#include "parser_version.h" - #include "parser_include.h" - #include "libapparmor_re/apparmor_re.h" - -@@ -138,7 +137,7 @@ static int debug = 0; - - static void display_version(void) - { -- printf("%s version " PARSER_VERSION "\n%s\n", parser_title, -+ printf("%s version " PACKAGE_VERSION "\n%s\n", parser_title, - parser_copyright); - } - ---- a/parser/parser_policy.c -+++ b/parser/parser_policy.c -@@ -19,6 +19,7 @@ - * Ltd. - */ - -+#define _GNU_SOURCE 1 - #include - #include - #include ---- a/parser/parser_regex.c -+++ b/parser/parser_regex.c -@@ -22,6 +22,8 @@ - #include - #define _(s) gettext(s) - -+#include -+ - /* #define DEBUG */ - - #include "parser.h" ---- a/parser/parser_symtab.c -+++ b/parser/parser_symtab.c -@@ -15,6 +15,7 @@ - * along with this program; if not, contact Novell, Inc. - */ - -+#define _GNU_SOURCE 1 - #include - #include - #include ---- a/parser/po/Makefile -+++ b/parser/po/Makefile -@@ -12,9 +12,11 @@ all: - - DISABLED_LANGS= - --include ../common/Make-po.rules --../common/Make-po.rules: -- make -C .. common/Make.rules -+NAME="apparmor-parser" -+ -+include ../../common/Make-po.rules -+../../common/Make-po.rules: -+ make -C ../.. common/Make.rules - - XGETTEXT_ARGS+=--language=C --keyword=_ $(shell if [ -f ${NAME}.pot ] ; then echo -n -j ; fi) - ---- /dev/null -+++ b/po/Makefile.am -@@ -0,0 +1,2 @@ -+ -+ ---- a/profiles/Makefile -+++ b/profiles/Makefile -@@ -20,7 +20,7 @@ - # Makefile for LSM-based AppArmor profiles - - NAME=apparmor-profiles --ALL: local -+all: local - COMMONDIR=../common/ - - include common/Make.rules ---- /dev/null -+++ b/tests/Makefile.am -@@ -0,0 +1 @@ -+SUBDIRS = regression ---- /dev/null -+++ b/tests/regression/Makefile.am -@@ -0,0 +1 @@ -+SUBDIRS = subdomain ---- /dev/null -+++ b/tests/regression/subdomain/Makefile.am -@@ -0,0 +1,109 @@ -+TESTS = access \ -+ capabilities \ -+ changeprofile \ -+ changehat \ -+ changehat_pthread \ -+ changehat_fork \ -+ changehat_misc \ -+ chdir \ -+ clone \ -+ deleted \ -+ environ \ -+ exec \ -+ exec_qual \ -+ fchdir \ -+ fork \ -+ i18n \ -+ link \ -+ link_subset \ -+ mkdir \ -+ mmap \ -+ mount \ -+ mult_mount \ -+ named_pipe \ -+ net_raw \ -+ open \ -+ openat \ -+ pipe \ -+ ptrace \ -+ pwrite \ -+ regex \ -+ rename \ -+ readdir \ -+ rw \ -+ swap \ -+ sd_flags \ -+ setattr \ -+ symlink \ -+ syscall \ -+ unix_fd_server \ -+ unlink\ -+ xattrs\ -+ longpath -+ -+check_PROGRAMS = $(TESTS) -+ -+ -+SOURCES = \ -+ access.c \ -+ changeprofile.c \ -+ changehat.c \ -+ changehat_fork.c \ -+ changehat_misc.c \ -+ changehat_misc2.c \ -+ changehat_twice.c \ -+ changehat_fail.c \ -+ changehat_wrapper.c \ -+ changehat_pthread.c \ -+ chdir.c \ -+ chgrp.c \ -+ chmod.c \ -+ chown.c \ -+ clone.c \ -+ deleted.c \ -+ environ.c \ -+ env_check.c \ -+ exec.c \ -+ exec_qual.c \ -+ exec_qual2.c \ -+ fchdir.c \ -+ fchgrp.c \ -+ fchmod.c \ -+ fchown.c \ -+ fork.c \ -+ link.c \ -+ link_subset.c \ -+ mmap.c \ -+ mkdir.c \ -+ mount.c \ -+ named_pipe.c \ -+ net_raw.c \ -+ open.c \ -+ openat.c \ -+ pipe.c \ -+ ptrace.c \ -+ ptrace_helper.c \ -+ pwrite.c \ -+ rename.c \ -+ readdir.c \ -+ rw.c \ -+ symlink.c \ -+ syscall_mknod.c \ -+ swap.c \ -+ syscall_chroot.c \ -+ syscall_mlockall.c \ -+ syscall_ptrace.c \ -+ syscall_reboot.c \ -+ syscall_setpriority.c \ -+ syscall_sethostname.c \ -+ syscall_setdomainname.c \ -+ syscall_setscheduler.c \ -+ syscall_sysctl.c \ -+ sysctl_proc.c \ -+ tcp.c \ -+ unix_fd_client.c \ -+ unix_fd_server.c \ -+ unlink.c \ -+ xattrs.c -+ -+changehat_pthread_LDFLAGS = -pthread ---- /dev/null -+++ b/utils/Immunix/Makefile.am -@@ -0,0 +1,3 @@ -+perlmoddir =$(VENDOR_PERL)/Immunix -+ -+perlmod_DATA = AppArmor.pm Repository.pm Config.pm Reports.pm Severity.pm SubDomain.pm ---- /dev/null -+++ b/utils/Makefile.PL -@@ -0,0 +1,15 @@ -+#!/usr/bin/perl -w -+ -+use ExtUtils::MakeMaker; -+ -+use vars qw($CFLAGS $OBJECT $VERSION $OPTIMIZE); -+ -+WriteMakefile( -+ 'NAME' => 'AppArmor', -+ 'MAKEFILE' => 'Makefile.perl', -+ 'FIRST_MAKEFILE' => 'Makefile.perl', -+ 'ABSTRACT' => q[AppArmor utility interface], -+ 'EXE_FILES'=> ['genprof', 'logprof', 'autodep', 'audit', -+ 'complain', 'enforce', 'unconfined', 'aa-eventd', -+ 'apparmor_status', 'apparmor_notify'], -+); ---- /dev/null -+++ b/utils/Makefile.am -@@ -0,0 +1,36 @@ -+dist_man_MANS = aa-autodep.8 aa-complain.8 aa-enforce.8 aa-logprof.8 \ -+ aa-genprof.8 aa-unconfined.8 aa-audit.8 aa-status.8 \ -+ aa-decode.8 aa-notify.8 logprof.conf.5 -+noinst_DATA = $(addsuffix .html,$(dist_man_MANS)) -+ -+sbin_SCRIPTS = aa-genprof aa-logprof aa-autodep aa-audit aa-complain \ -+ aa-enforce aa-unconfined aa-eventd aa-status aa-decode \ -+ aa-notify -+ -+etc_apparmor_DATA = logprof.conf notify.conf severity.db -+ -+install-data-local: -+ $(mkinstalldirs) $(DESTDIR)/var/log/apparmor -+ -+CLEANFILES = $(dist_man_MANS) Makefile.perl blib $(dist_man_MANS) -+ -+PODARGS = --center=AppArmor --release=NOVELL/SUSE -+ -+pod2man = pod2man $(PODARGS) --section $(subst .,,$(suffix $<)) $< > $@ -+ -+.pod.5: -+ $(pod2man) -+.pod.7: -+ $(pod2man) -+.pod.8: -+ $(pod2man) -+ -+pod2html = pod2html --header --css ../common/apparmor.css --infile=$< --outfile=$@ -+ -+%.5.html : %.pod -+ $(pod2html) -+%.7.html : %.pod -+ $(pod2html) -+%.8.html : %.pod -+ $(pod2html) -+SUBDIRS = po Immunix ---- a/utils/po/Makefile -+++ b/utils/po/Makefile -@@ -18,10 +18,12 @@ all: - # As translations get added, they will automatically be included, unless - # the lang is explicitly added to DISABLED_LANGS; e.g. DISABLED_LANGS=en es - -+NAME="apparmor-utils" -+ - DISABLED_LANGS= - --include ../common/Make-po.rules --../common/Make-po.rules: -- make -C .. common/Make.rules -+include ../../common/Make-po.rules -+../../common/Make-po.rules: -+ make -C ../.. common/Make.rules - - XGETTEXT_ARGS+=--language=perl diff --git a/apparmor-2.7.103.tar.gz b/apparmor-2.7.103.tar.gz new file mode 100644 index 0000000..08cdcbb --- /dev/null +++ b/apparmor-2.7.103.tar.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8157ed9aed68db66318e424aa95644b28294876df2672d6c450600b84bafe315 +size 1452366 diff --git a/apparmor-2.7.2.tar.gz b/apparmor-2.7.2.tar.gz deleted file mode 100644 index 6c0a722..0000000 --- a/apparmor-2.7.2.tar.gz +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:42deb8cbf4937fac07a48ec8427b90131e92ed2f83b606beee092bdb4fc2a41f -size 1403151 diff --git a/apparmor-r2022-log-parser-network-bnc755923.patch b/apparmor-r2022-log-parser-network-bnc755923.patch deleted file mode 100644 index 100e8a3..0000000 --- a/apparmor-r2022-log-parser-network-bnc755923.patch +++ /dev/null @@ -1,308 +0,0 @@ ------------------------------------------------------------- -revno: 2022 -fixes bug: https://launchpad.net/bugs/800826 -committer: Steve Beattie -branch nick: apparmor -timestamp: Fri 2012-04-06 15:59:04 -0700 -message: - libapparmor: add support for ip addresses and ports - - Bugs: https://bugs.launchpad.net/ubuntu/+source/apparmor/+bug/800826 - https://bugzilla.novell.com/show_bug.cgi?id=755923 - - This patch modifies the libapparmor log parsing code to add support - for the additional ip address and port keywords that can occur in - network rejection rules. The laddr and faddr keywords stand for local - address and foreign address respectively. - - The regex used to match an ip address is not very strict, to hopefully - catch the formats that the kernel emits for ipv6 addresses; however, - because this is in a context triggered by the addr keywords, it should - not over-eagerly consume non-ip addresses. Said addresses are returned - as strings in the struct to be processed by the calling application. - - -=== modified file 'libraries/libapparmor/src/aalogparse.h' ---- libraries/libapparmor/src/aalogparse.h 2011-02-23 22:02:45 +0000 -+++ libraries/libapparmor/src/aalogparse.h 2012-04-06 22:59:04 +0000 -@@ -141,6 +141,10 @@ - char *net_family; - char *net_protocol; - char *net_sock_type; -+ char *net_local_addr; -+ unsigned long net_local_port; -+ char *net_foreign_addr; -+ unsigned long net_foreign_port; - } aa_log_record; - - /** - -=== modified file 'libraries/libapparmor/src/grammar.y' ---- libraries/libapparmor/src/grammar.y 2011-11-30 19:07:48 +0000 -+++ libraries/libapparmor/src/grammar.y 2012-04-06 22:59:04 +0000 -@@ -83,6 +83,7 @@ - %token TOK_QUOTED_STRING TOK_ID TOK_MODE TOK_DMESG_STAMP - %token TOK_AUDIT_DIGITS TOK_DATE_MONTH TOK_DATE_TIME - %token TOK_HEXSTRING TOK_TYPE_OTHER TOK_MSG_REST -+%token TOK_IP_ADDR - - %token TOK_EQUALS - %token TOK_COLON -@@ -133,6 +134,10 @@ - %token TOK_KEY_CAPNAME - %token TOK_KEY_OFFSET - %token TOK_KEY_TARGET -+%token TOK_KEY_LADDR -+%token TOK_KEY_FADDR -+%token TOK_KEY_LPORT -+%token TOK_KEY_FPORT - - %token TOK_SYSLOG_KERNEL - -@@ -268,6 +273,14 @@ - { /* target was always name2 in the past */ - ret_record->name2 = $3; - } -+ | TOK_KEY_LADDR TOK_EQUALS TOK_IP_ADDR -+ { ret_record->net_local_addr = $3;} -+ | TOK_KEY_FADDR TOK_EQUALS TOK_IP_ADDR -+ { ret_record->net_foreign_addr = $3;} -+ | TOK_KEY_LPORT TOK_EQUALS TOK_DIGITS -+ { ret_record->net_local_port = $3;} -+ | TOK_KEY_FPORT TOK_EQUALS TOK_DIGITS -+ { ret_record->net_foreign_port = $3;} - | TOK_MSG_REST - { - ret_record->event = AA_RECORD_INVALID; - -=== modified file 'libraries/libapparmor/src/scanner.l' ---- libraries/libapparmor/src/scanner.l 2011-11-30 19:07:48 +0000 -+++ libraries/libapparmor/src/scanner.l 2012-04-06 22:59:04 +0000 -@@ -133,8 +133,15 @@ - key_capname "capname" - key_offset "offset" - key_target "target" -+key_laddr "laddr" -+key_faddr "faddr" -+key_lport "lport" -+key_fport "fport" - audit "audit" - -+/* network addrs */ -+ip_addr [a-f[:digit:].:]{3,} -+ - /* syslog tokens */ - syslog_kernel kernel{colon} - syslog_month Jan(uary)?|Feb(ruary)?|Mar(ch)?|Apr(il)?|May|Jun(e)?|Jul(y)?|Aug(ust)?|Sep(tember)?|Oct(ober)?|Nov(ember)?|Dec(ember)? -@@ -149,6 +156,7 @@ - %x dmesg_timestamp - %x safe_string - %x audit_types -+%x ip_addr - %x other_audit - %x unknown_message - -@@ -201,6 +209,12 @@ - . { /* eek, error! try another state */ BEGIN(INITIAL); yyless(0); } - } - -+{ -+ {ip_addr} { yylval->t_str = strdup(yytext); yy_pop_state(yyscanner); return(TOK_IP_ADDR); } -+ {equals} { return(TOK_EQUALS); } -+ . { /* eek, error! try another state */ BEGIN(INITIAL); yyless(0); } -+ } -+ - { - {equals} { return(TOK_EQUALS); } - {digits} { yylval->t_long = atol(yytext); BEGIN(INITIAL); return(TOK_DIGITS); } -@@ -270,6 +284,10 @@ - {key_capname} { return(TOK_KEY_CAPNAME); } - {key_offset} { return(TOK_KEY_OFFSET); } - {key_target} { return(TOK_KEY_TARGET); } -+{key_laddr} { yy_push_state(ip_addr, yyscanner); return(TOK_KEY_LADDR); } -+{key_faddr} { yy_push_state(ip_addr, yyscanner); return(TOK_KEY_FADDR); } -+{key_lport} { return(TOK_KEY_LPORT); } -+{key_fport} { return(TOK_KEY_FPORT); } - - {syslog_kernel} { BEGIN(dmesg_timestamp); return(TOK_SYSLOG_KERNEL); } - {syslog_month} { yylval->t_str = strdup(yytext); return(TOK_DATE_MONTH); } - -=== modified file 'libraries/libapparmor/testsuite/test_multi.c' ---- libraries/libapparmor/testsuite/test_multi.c 2010-07-26 16:20:02 +0000 -+++ libraries/libapparmor/testsuite/test_multi.c 2012-04-06 22:59:04 +0000 -@@ -51,6 +51,18 @@ - return ret; - } - -+#define print_string(description, var) \ -+ if ((var) != NULL) { \ -+ printf("%s: %s\n", (description), (var)); \ -+ } -+ -+/* unset is the value that the library sets to the var to indicate -+ that it is unset */ -+#define print_long(description, var, unset) \ -+ if ((var) != (unsigned long) (unset)) { \ -+ printf("%s: %ld\n", (description), (var)); \ -+ } -+ - int print_results(aa_log_record *record) - { - printf("Event type: "); -@@ -185,6 +197,11 @@ - { - printf("Protocol: %s\n", record->net_protocol); - } -+ print_string("Local addr", record->net_local_addr); -+ print_string("Foreign addr", record->net_foreign_addr); -+ print_long("Local port", record->net_local_port, 0); -+ print_long("Foreign port", record->net_foreign_port, 0); -+ - printf("Epoch: %lu\n", record->epoch); - printf("Audit subid: %u\n", record->audit_sub_id); - return(0); - -=== added file 'libraries/libapparmor/testsuite/test_multi/testcase_network_01.err' -=== added file 'libraries/libapparmor/testsuite/test_multi/testcase_network_01.in' ---- libraries/libapparmor/testsuite/test_multi/testcase_network_01.in 1970-01-01 00:00:00 +0000 -+++ libraries/libapparmor/testsuite/test_multi/testcase_network_01.in 2012-04-06 22:59:04 +0000 -@@ -0,0 +1,1 @@ -+Apr 5 19:30:56 precise-amd64 kernel: [153073.826757] type=1400 audit(1308766940.698:3704): apparmor="DENIED" operation="sendmsg" parent=24737 profile="/usr/bin/evince-thumbnailer" pid=24743 comm="evince-thumbnai" laddr=192.168.66.150 lport=765 faddr=192.168.66.200 fport=2049 family="inet" sock_type="stream" protocol=6 - -=== added file 'libraries/libapparmor/testsuite/test_multi/testcase_network_01.out' ---- libraries/libapparmor/testsuite/test_multi/testcase_network_01.out 1970-01-01 00:00:00 +0000 -+++ libraries/libapparmor/testsuite/test_multi/testcase_network_01.out 2012-04-06 22:59:04 +0000 -@@ -0,0 +1,18 @@ -+START -+File: test_multi/testcase_network_01.in -+Event type: AA_RECORD_DENIED -+Audit ID: 1308766940.698:3704 -+Operation: sendmsg -+Profile: /usr/bin/evince-thumbnailer -+Command: evince-thumbnai -+Parent: 24737 -+PID: 24743 -+Network family: inet -+Socket type: stream -+Protocol: tcp -+Local addr: 192.168.66.150 -+Foreign addr: 192.168.66.200 -+Local port: 765 -+Foreign port: 2049 -+Epoch: 1308766940 -+Audit subid: 3704 - -=== added file 'libraries/libapparmor/testsuite/test_multi/testcase_network_02.err' -=== added file 'libraries/libapparmor/testsuite/test_multi/testcase_network_02.in' ---- libraries/libapparmor/testsuite/test_multi/testcase_network_02.in 1970-01-01 00:00:00 +0000 -+++ libraries/libapparmor/testsuite/test_multi/testcase_network_02.in 2012-04-06 22:59:04 +0000 -@@ -0,0 +1,1 @@ -+Apr 5 19:31:04 precise-amd64 kernel: [153073.826757] type=1400 audit(1308766940.698:3704): apparmor="DENIED" operation="sendmsg" parent=24737 profile="/usr/bin/evince-thumbnailer" pid=24743 comm="evince-thumbnai" lport=765 fport=2049 family="inet" sock_type="stream" protocol=6 - -=== added file 'libraries/libapparmor/testsuite/test_multi/testcase_network_02.out' ---- libraries/libapparmor/testsuite/test_multi/testcase_network_02.out 1970-01-01 00:00:00 +0000 -+++ libraries/libapparmor/testsuite/test_multi/testcase_network_02.out 2012-04-06 22:59:04 +0000 -@@ -0,0 +1,16 @@ -+START -+File: test_multi/testcase_network_02.in -+Event type: AA_RECORD_DENIED -+Audit ID: 1308766940.698:3704 -+Operation: sendmsg -+Profile: /usr/bin/evince-thumbnailer -+Command: evince-thumbnai -+Parent: 24737 -+PID: 24743 -+Network family: inet -+Socket type: stream -+Protocol: tcp -+Local port: 765 -+Foreign port: 2049 -+Epoch: 1308766940 -+Audit subid: 3704 - -=== added file 'libraries/libapparmor/testsuite/test_multi/testcase_network_03.err' -=== added file 'libraries/libapparmor/testsuite/test_multi/testcase_network_03.in' ---- libraries/libapparmor/testsuite/test_multi/testcase_network_03.in 1970-01-01 00:00:00 +0000 -+++ libraries/libapparmor/testsuite/test_multi/testcase_network_03.in 2012-04-06 22:59:04 +0000 -@@ -0,0 +1,1 @@ -+type=AVC msg=audit(1333648169.009:11707146): apparmor="ALLOWED" operation="accept" parent=25932 profile="/usr/lib/dovecot/imap-login" pid=5049 comm="imap-login" lport=143 family="inet6" sock_type="stream" protocol=6 - -=== added file 'libraries/libapparmor/testsuite/test_multi/testcase_network_03.out' ---- libraries/libapparmor/testsuite/test_multi/testcase_network_03.out 1970-01-01 00:00:00 +0000 -+++ libraries/libapparmor/testsuite/test_multi/testcase_network_03.out 2012-04-06 22:59:04 +0000 -@@ -0,0 +1,15 @@ -+START -+File: test_multi/testcase_network_03.in -+Event type: AA_RECORD_ALLOWED -+Audit ID: 1333648169.009:11707146 -+Operation: accept -+Profile: /usr/lib/dovecot/imap-login -+Command: imap-login -+Parent: 25932 -+PID: 5049 -+Network family: inet6 -+Socket type: stream -+Protocol: tcp -+Local port: 143 -+Epoch: 1333648169 -+Audit subid: 11707146 - -=== added file 'libraries/libapparmor/testsuite/test_multi/testcase_network_04.err' -=== added file 'libraries/libapparmor/testsuite/test_multi/testcase_network_04.in' ---- libraries/libapparmor/testsuite/test_multi/testcase_network_04.in 1970-01-01 00:00:00 +0000 -+++ libraries/libapparmor/testsuite/test_multi/testcase_network_04.in 2012-04-06 22:59:04 +0000 -@@ -0,0 +1,1 @@ -+type=AVC msg=audit(1333697181.284:273901): apparmor="DENIED" operation="recvmsg" parent=1596 profile="/home/ubuntu/tmp/nc" pid=1056 comm="nc" laddr=::1 lport=2048 faddr=::1 fport=33986 family="inet6" sock_type="stream" protocol=6 - -=== added file 'libraries/libapparmor/testsuite/test_multi/testcase_network_04.out' ---- libraries/libapparmor/testsuite/test_multi/testcase_network_04.out 1970-01-01 00:00:00 +0000 -+++ libraries/libapparmor/testsuite/test_multi/testcase_network_04.out 2012-04-06 22:59:04 +0000 -@@ -0,0 +1,18 @@ -+START -+File: test_multi/testcase_network_04.in -+Event type: AA_RECORD_DENIED -+Audit ID: 1333697181.284:273901 -+Operation: recvmsg -+Profile: /home/ubuntu/tmp/nc -+Command: nc -+Parent: 1596 -+PID: 1056 -+Network family: inet6 -+Socket type: stream -+Protocol: tcp -+Local addr: ::1 -+Foreign addr: ::1 -+Local port: 2048 -+Foreign port: 33986 -+Epoch: 1333697181 -+Audit subid: 273901 - -=== added file 'libraries/libapparmor/testsuite/test_multi/testcase_network_05.err' -=== added file 'libraries/libapparmor/testsuite/test_multi/testcase_network_05.in' ---- libraries/libapparmor/testsuite/test_multi/testcase_network_05.in 1970-01-01 00:00:00 +0000 -+++ libraries/libapparmor/testsuite/test_multi/testcase_network_05.in 2012-04-06 22:59:04 +0000 -@@ -0,0 +1,1 @@ -+type=AVC msg=audit(1333698107.128:273917): apparmor="DENIED" operation="recvmsg" parent=1596 profile="/home/ubuntu/tmp/nc" pid=1875 comm="nc" laddr=::ffff:127.0.0.1 lport=2048 faddr=::ffff:127.0.0.1 fport=59180 family="inet6" sock_type="stream" protocol=6 - -=== added file 'libraries/libapparmor/testsuite/test_multi/testcase_network_05.out' ---- libraries/libapparmor/testsuite/test_multi/testcase_network_05.out 1970-01-01 00:00:00 +0000 -+++ libraries/libapparmor/testsuite/test_multi/testcase_network_05.out 2012-04-06 22:59:04 +0000 -@@ -0,0 +1,18 @@ -+START -+File: test_multi/testcase_network_05.in -+Event type: AA_RECORD_DENIED -+Audit ID: 1333698107.128:273917 -+Operation: recvmsg -+Profile: /home/ubuntu/tmp/nc -+Command: nc -+Parent: 1596 -+PID: 1875 -+Network family: inet6 -+Socket type: stream -+Protocol: tcp -+Local addr: ::ffff:127.0.0.1 -+Foreign addr: ::ffff:127.0.0.1 -+Local port: 2048 -+Foreign port: 59180 -+Epoch: 1333698107 -+Audit subid: 273917 diff --git a/apparmor-remove-repo b/apparmor-remove-repo deleted file mode 100644 index 458936d..0000000 --- a/apparmor-remove-repo +++ /dev/null @@ -1,56 +0,0 @@ -From: Jeff Mahoney -Subject: apparmor-utils: Allow repository to be completely disabled - - This patch allows the repository to be completely disabled. It's been - subject to massive bitrot and isn't really maintained. - - It will only confuse the user if they are asked for repository information - and it doesn't work. - -Signed-off-by: Jeff Mahoney ---- - utils/Immunix/AppArmor.pm | 5 +++++ - utils/logprof.conf | 4 ++++ - 2 files changed, 9 insertions(+) - ---- a/utils/Immunix/AppArmor.pm -+++ b/utils/Immunix/AppArmor.pm -@@ -3153,6 +3153,8 @@ sub UI_repo_signup() { - sub UI_ask_to_enable_repo() { - - my $q = { }; -+ return if (defined $cfg->{settings}{allow_repository} && -+ $cfg->{settings}{allow_repository} eq "no"); - return if ( not defined $cfg->{repository}{url} ); - $q->{headers} = [ - gettext("Repository"), $cfg->{repository}{url}, -@@ -3277,6 +3279,8 @@ sub get_preferred_user ($) { - - sub repo_is_enabled () { - my $enabled; -+ return 0 if defined($cfg->{settings}{allow_repository}) && -+ $cfg->{settings}{allow_repository} eq "no"; - if ($cfg->{repository}{url} && - $repo_cfg && - $repo_cfg->{repository}{enabled} && -@@ -3290,6 +3294,7 @@ sub repo_is_enabled () { - sub update_repo_profile($) { - my $profile = shift; - -+ return undef if not repo_is_enabled(); - return undef if ( not is_repo_profile($profile) ); - my $distro = $cfg->{repository}{distro}; - my $url = $profile->{repo}{url}; ---- a/utils/logprof.conf -+++ b/utils/logprof.conf -@@ -34,6 +34,10 @@ - # files. - custom_includes = - -+ # whether to prompt to enable repositories (values: yes/no) -+ # This feature has fallen to bitrot and should not be used. -+ allow_repository = no -+ - - [repository] - distro = ubuntu-intrepid diff --git a/apparmor-techdoc.patch b/apparmor-techdoc.patch new file mode 100644 index 0000000..10bfda8 --- /dev/null +++ b/apparmor-techdoc.patch @@ -0,0 +1,80 @@ +Various changes in building techdoc.tex: +- make table of contents, footnotes etc. clickable hyperlinks +- use timestamp of techdoc.tex (instead of build time) as creationdate + in the PDF metadata +- don't include build date on first page of the PDF +- make clean: + - delete techdoc.out (created by pdftex) + - fix deletion of techdoc.txt (was techdo_r_.txt) + +The initial target was to get reproduceable PDF builds (therefore the +timestamp-related changes), the other things came up during discussing +this patch with David Haller. + +The only remaining difference in the PDF from build to build is the /ID +line. This line can't be controlled in pdflatex and is now filtered +out by build-compare in the openSUSE build service (bnc#760867). + +Credits go to David Haller for writing large parts of this patch +(but he didn't notice the techdo_r_.txt ;-) + + +Signed-Off-By: Christian Boltz + + + +=== modified file 'parser/Makefile' +--- parser/Makefile 2012-03-22 20:19:27 +0000 ++++ parser/Makefile 2012-05-08 18:40:10 +0000 +@@ -118,7 +118,8 @@ + $(MAKE) -C po ${NAME}.pot NAME=${NAME} SOURCES="${SRCS} ${HDRS}" + + techdoc.pdf: techdoc.tex +- while pdflatex $< ${BUILD_OUTPUT} || exit 1 ; \ ++ timestamp=$(shell date "+%Y%m%d%H%M%S+02'00'" -r $< );\ ++ while pdflatex "\def\fixedpdfdate{$$timestamp}\input $<" ${BUILD_OUTPUT} || exit 1 ; \ + grep -q "Label(s) may have changed" techdoc.log; \ + do :; done + +@@ -302,7 +303,7 @@ + rm -f $(NAME)*.tar.gz $(NAME)*.tgz + rm -f af_names.h + rm -f cap_names.h +- rm -rf techdoc.aux techdoc.log techdoc.pdf techdoc.toc techdor.txt techdoc/ ++ rm -rf techdoc.aux techdoc.out techdoc.log techdoc.pdf techdoc.toc techdoc.txt techdoc/ + $(MAKE) -s -C $(AAREDIR) clean + $(MAKE) -s -C po clean + $(MAKE) -s -C tst clean + +=== modified file 'parser/techdoc.tex' +--- parser/techdoc.tex 2011-02-09 22:29:05 +0000 ++++ parser/techdoc.tex 2012-05-08 18:55:56 +0000 +@@ -5,6 +5,17 @@ + \usepackage{url} + %\usepackage{times} + ++\usepackage[pdftex, ++ pdfauthor={Andreas Gruenbacher and Seth Arnold}, ++ pdftitle={AppArmor Technical Documentation},% ++\ifx\fixedpdfdate\@empty\else ++ pdfcreationdate={\fixedpdfdate}, ++ pdfmoddate={\fixedpdfdate}, ++\fi ++ pdfsubject={AppArmor}, ++ pdfkeywords={AppArmor} ++]{hyperref} ++ + \hyphenation{App-Armor} + \hyphenation{name-space} + +@@ -14,7 +25,8 @@ + \author{Andreas Gruenbacher and Seth Arnold \\ + \url{{agruen,seth.arnold}@suse.de} \\ + SUSE Labs / Novell} +-%\date{} ++% don't include the (build!) date ++\date{} + + \begin{document} + + diff --git a/apparmor-utils-subdomain-compat b/apparmor-utils-subdomain-compat index 285798d..42ac1c1 100644 --- a/apparmor-utils-subdomain-compat +++ b/apparmor-utils-subdomain-compat @@ -28,7 +28,7 @@ Signed-off-by: Christian Boltz --- a/utils/Makefile 2011-05-27 21:08:50.000000000 +0200 +++ b/utils/Makefile 2011-09-10 17:57:55.000000000 +0200 @@ -31,7 +31,7 @@ PERLTOOLS = aa-genprof aa-logprof aa-aut - aa-unconfined aa-notify aa-disable + aa-unconfined aa-notify aa-disable aa-exec TOOLS = ${PERLTOOLS} aa-decode aa-status MODULES = ${MODDIR}/AppArmor.pm ${MODDIR}/Repository.pm \ - ${MODDIR}/Config.pm ${MODDIR}/Severity.pm diff --git a/apparmor.changes b/apparmor.changes index 9efeadf..72bb089 100644 --- a/apparmor.changes +++ b/apparmor.changes @@ -1,3 +1,22 @@ +------------------------------------------------------------------- +Tue May 8 19:30:23 UTC 2012 - opensuse@cboltz.de + +- add apparmor-techdoc.patch to remove traces of the build time in PDF files + +------------------------------------------------------------------- +Sat May 5 20:25:49 UTC 2012 - opensuse@cboltz.de + +- update to AppArmor 2.8 beta5 (= 2.7.103 / r2031) + - new utility aa-exec to confine a program with the specified AppArmor profile + - add support for mount rules + - see http://wiki.apparmor.net/index.php/ReleaseNotes_2_8 for full upstream + changelog +- removed upstreamed and backported patches +- remove outdated autobuild and "disable repo" patches that were disabled since + the AppArmor 2.7 package +- create the Immunix::SubDomain compat perl module only for openSUSE <= 12.1 + (bnc#720617 #c7) + ------------------------------------------------------------------- Mon Apr 16 21:16:41 UTC 2012 - opensuse@cboltz.de diff --git a/apparmor.spec b/apparmor.spec index 9dcc395..9f5b227 100644 --- a/apparmor.spec +++ b/apparmor.spec @@ -43,9 +43,8 @@ Name: apparmor %if ! %{?distro:1}0 %define distro suse %endif -Version: 2.7.2 +Version: 2.7.103 Release: 0 -%define versiondir 2.7.2 Summary: AppArmor userlevel parser utility License: GPL-2.0+ Group: Productivity/Networking/Security @@ -57,9 +56,6 @@ Source3: update-trans.sh # profile for winbindd (bnc#748499, not upstreamed yet) Source4: usr.sbin.winbindd -# add "/sys/devices/system/cpu/online r" to abstractions/base. Will be included in upstream > 2.7.2 -Patch: 0001-fix-for-lp929531.patch - # enable caching of profiles (= massive performance speedup when loading profiles) Patch1: apparmor-enable-profile-cache.diff @@ -69,25 +65,17 @@ Patch2: apparmor-samba-include-permissions-for-shares.diff # split a long string in AppArmor.pm. Not accepted upstream because they want a solution without hardcoded width. Patch5: apparmor-utils-string-split -# use autobuild everywhere. Patch applies to 2.6.1 only and probably won't be accepted upstream. -Patch10: apparmor-2.5.1-unified-build -# requires Patch10 -Patch11: apparmor-2.5.1-rpmlint-asprintf - # Add support for eDirectory calls in abstractions/nameservice. Not accepted upstream (yet) because of open questions Patch12: apparmor-2.5.1-edirectory-profile -# obsolete, upstream implemented this in another way -Patch15: apparmor-remove-repo - -# remove after 12.1 release - bnc#720617 #c7 +# create Immunix::SubDomain perl module - only included for openSUSE <= 12.1 - bnc#720617 #c7 Patch21: apparmor-utils-subdomain-compat -# bnc#738905 - commited upstream (after 2.7.2) +# bnc#738905 - commited upstream (after 2.7.2 / 2.8 beta5) Patch22: apparmor-dnsmasq-profile-fix.patch -# bnc#755923 / lp#800826 - logprof etc. ignores network log entries because of changed log format. from upstream r2022 (2.8 beta5 will have it) -Patch23: apparmor-r2022-log-parser-network-bnc755923.patch +# use hyperref and use techdoc.tex's file date for the pdf's creation and mod date (avoids useless rebuilds) - patch sent upstream after 2.8 beta5 +Patch30: apparmor-techdoc.patch Url: https://launchpad.net/apparmor PreReq: sed @@ -104,6 +92,7 @@ BuildRequires: latex2html BuildRequires: libtool BuildRequires: pcre-devel BuildRequires: pkg-config +BuildRequires: python BuildRequires: texlive-latex BuildRequires: w3m @@ -412,18 +401,25 @@ SubDomain. %endif %prep -%setup -q -n %{name}-%{versiondir} -%patch -p1 +%setup -q %patch1 -p1 %patch2 -p0 %patch5 -p1 -#%patch10 -p1 # disabled, see above -#%patch11 -p1 # disabled, see above %patch12 -p1 -#%patch15 -p1 # obsolete, see above + +# only create Immunix::SubDomain perl module for openSUSE <= 12.1 +%if 0%{?suse_version} +%if 0%{?suse_version} <= 1210 %patch21 -p1 +%endif +%endif %patch22 -p1 -%patch23 -p0 + +# preserve timestamp of techdoc.tex +touch -r parser/techdoc.tex parser/techdoc.timestamp +%patch30 -p0 +# preserve timestamp of techdoc.tex +touch -r parser/techdoc.timestamp parser/techdoc.tex # profile for winbindd (bnc#748499, not upstreamed yet) test ! -e profiles/apparmor.d/usr.sbin.winbindd @@ -571,6 +567,9 @@ echo ------------------------------------------------------------------- %doc parser/*.[1-9].html %doc common/apparmor.css %doc parser/techdoc.pdf parser/techdoc/techdoc.html parser/techdoc/techdoc.css parser/techdoc.txt +# apparmor.vim is included in the vim package. Ideally it should be in a -devel package, but that's overmuch for one file +%dir /usr/share/apparmor +/usr/share/apparmor/apparmor.vim %files parser %defattr(-,root,root) @@ -656,6 +655,7 @@ fi %doc %{_mandir}/man8/complain.8.gz %doc %{_mandir}/man8/disable.8.gz %doc %{_mandir}/man8/enforce.8.gz +%doc %{_mandir}/man8/exec.8.gz %doc %{_mandir}/man8/genprof.8.gz %doc %{_mandir}/man8/logprof.8.gz %doc %{_mandir}/man8/unconfined.8.gz