forked from pool/s390-tools
Marcus Meissner
9b729e2acc
New package per "Factory first" policy. Please list me as bug owner and maintainer, if possible. OBS-URL: https://build.opensuse.org/request/show/459343 OBS-URL: https://build.opensuse.org/package/show/Base:System/s390-tools?expand=0&rev=1
4688 lines
112 KiB
Diff
4688 lines
112 KiB
Diff
Subject: [PATCH] [FEAT RTL1601] dump2tar: Add sysfs collection helper for dbginfo.sh
|
|
From: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
|
|
|
|
Summary: dump2tar: Add sysfs collection helper for dbginfo.sh
|
|
Description: Certain files in virtual filesystems such as sysfs,
|
|
debugfs and procfs do not correctly report their size.
|
|
As a result, tools like tar, cp or rsync cannot be easily
|
|
used to collect these files.
|
|
|
|
This patch adds a tool to efficiently dump such files
|
|
into a compressed tar archive. It is intended to be used
|
|
by the dbginfo.sh script to significantly speed up data
|
|
collection.
|
|
Upstream-ID: -
|
|
Problem-ID: RTL1601
|
|
|
|
Changelog:
|
|
|
|
- v2: Fix compiler warning and missing DESTDIR logic in Makefiles
|
|
|
|
Signed-off-by: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
|
|
---
|
|
Makefile | 2
|
|
README | 5
|
|
dump2tar/Makefile | 12
|
|
dump2tar/include/buffer.h | 47 +
|
|
dump2tar/include/dref.h | 26
|
|
dump2tar/include/dump.h | 60 +
|
|
dump2tar/include/global.h | 20
|
|
dump2tar/include/idcache.h | 23
|
|
dump2tar/include/misc.h | 98 ++
|
|
dump2tar/include/strarray.h | 23
|
|
dump2tar/include/tar.h | 41
|
|
dump2tar/man/Makefile | 12
|
|
dump2tar/man/dump2tar.1 | 454 ++++++++++
|
|
dump2tar/src/Makefile | 36
|
|
dump2tar/src/buffer.c | 271 ++++++
|
|
dump2tar/src/dref.c | 92 ++
|
|
dump2tar/src/dump.c | 1850 ++++++++++++++++++++++++++++++++++++++++++++
|
|
dump2tar/src/dump2tar.c | 474 +++++++++++
|
|
dump2tar/src/global.c | 17
|
|
dump2tar/src/idcache.c | 153 +++
|
|
dump2tar/src/misc.c | 492 +++++++++++
|
|
dump2tar/src/strarray.c | 81 +
|
|
dump2tar/src/tar.c | 270 ++++++
|
|
23 files changed, 4558 insertions(+), 1 deletion(-)
|
|
|
|
--- a/Makefile
|
|
+++ b/Makefile
|
|
@@ -8,7 +8,7 @@ SUB_DIRS = $(LIB_DIRS) zipl zdump fdasd
|
|
tape390 osasnmpd qetharp ip_watcher qethconf scripts zconf \
|
|
vmconvert vmcp man mon_tools dasdinfo vmur cpuplugd ipl_tools \
|
|
ziomon iucvterm hyptop cmsfs-fuse qethqoat zfcpdump zdsfs cpumf \
|
|
- systemd hmcdrvfs cpacfstats zdev
|
|
+ systemd hmcdrvfs cpacfstats zdev dump2tar
|
|
|
|
all: subdirs_make
|
|
|
|
--- a/README
|
|
+++ b/README
|
|
@@ -210,6 +210,11 @@ s390-tools (1.34.0)
|
|
configuration of devices and device drivers which are specific to the s390
|
|
platform.
|
|
|
|
+ * dump2tar:
|
|
+ dump2tar is a tool for creating a tar archive from the contents of
|
|
+ arbitrary files. It works even when the size of the actual file content
|
|
+ is not known beforehand (e.g. FIFOs, sysfs files).
|
|
+
|
|
For more information refer to the following publications:
|
|
* "Device Drivers, Features, and Commands" chapter "Useful Linux commands"
|
|
* "Using the dump tools"
|
|
--- /dev/null
|
|
+++ b/dump2tar/Makefile
|
|
@@ -0,0 +1,12 @@
|
|
+# Common definitions
|
|
+include ../common.mak
|
|
+
|
|
+all:
|
|
+ $(MAKE) -C src
|
|
+
|
|
+install: all
|
|
+ $(MAKE) -C src install
|
|
+ $(MAKE) -C man install
|
|
+
|
|
+clean:
|
|
+ $(MAKE) -C src clean
|
|
--- /dev/null
|
|
+++ b/dump2tar/include/buffer.h
|
|
@@ -0,0 +1,47 @@
|
|
+/*
|
|
+ * dump2tar - tool to dump files and command output into a tar archive
|
|
+ *
|
|
+ * Data buffering functions
|
|
+ *
|
|
+ * Copyright IBM Corp. 2016
|
|
+ */
|
|
+
|
|
+#ifndef BUFFER_H
|
|
+#define BUFFER_H
|
|
+
|
|
+#include <stdbool.h>
|
|
+#include <stdio.h>
|
|
+#include <stdlib.h>
|
|
+
|
|
+/* Buffers for building tar file entries */
|
|
+struct buffer {
|
|
+ size_t total; /* Total number of bytes in buffer */
|
|
+ size_t off; /* Current offset to next free byte in memory buffer */
|
|
+ size_t size; /* Memory buffer size */
|
|
+ char *addr; /* Memory buffer address */
|
|
+ bool fd_open; /* Has fd been openend yet? */
|
|
+ FILE *file; /* FILE * of file containing previous buffer data */
|
|
+ int fd; /* Handle of file containing previous buffer data */
|
|
+};
|
|
+
|
|
+void buffer_init(struct buffer *buffer, size_t size);
|
|
+struct buffer *buffer_alloc(size_t size);
|
|
+void buffer_reset(struct buffer *buffer);
|
|
+void buffer_close(struct buffer *buffer);
|
|
+void buffer_free(struct buffer *buffer, bool dyn);
|
|
+int buffer_open(struct buffer *buffer);
|
|
+int buffer_flush(struct buffer *buffer);
|
|
+ssize_t buffer_make_room(struct buffer *buffer, size_t size, bool usefile,
|
|
+ size_t max_buffer_size);
|
|
+int buffer_truncate(struct buffer *buffer, size_t len);
|
|
+
|
|
+ssize_t buffer_read_fd(struct buffer *buffer, int fd, size_t chunk,
|
|
+ bool usefile, size_t max_buffer_size);
|
|
+int buffer_add_data(struct buffer *buffer, char *addr, size_t len,
|
|
+ bool usefile, size_t max_buffer_size);
|
|
+
|
|
+typedef int (*buffer_cb_t)(void *data, void *addr, size_t len);
|
|
+int buffer_iterate(struct buffer *buffer, buffer_cb_t cb, void *data);
|
|
+void buffer_print(struct buffer *buffer);
|
|
+
|
|
+#endif /* BUFFER_H */
|
|
--- /dev/null
|
|
+++ b/dump2tar/include/dref.h
|
|
@@ -0,0 +1,26 @@
|
|
+/*
|
|
+ * dump2tar - tool to dump files and command output into a tar archive
|
|
+ *
|
|
+ * Reference counting for directory handles
|
|
+ *
|
|
+ * Copyright IBM Corp. 2016
|
|
+ */
|
|
+
|
|
+#ifndef DREF_H
|
|
+#define DREF_H
|
|
+
|
|
+#include <dirent.h>
|
|
+#include <stdbool.h>
|
|
+
|
|
+/* Multiple jobs may refer to an open DIR * - need reference counting */
|
|
+struct dref {
|
|
+ DIR *dd;
|
|
+ int dirfd;
|
|
+ unsigned int count;
|
|
+};
|
|
+
|
|
+struct dref *dref_create(const char *dirname);
|
|
+struct dref *dref_get(struct dref *dref);
|
|
+void dref_put(struct dref *dref);
|
|
+
|
|
+#endif /* DREF_H */
|
|
--- /dev/null
|
|
+++ b/dump2tar/include/dump.h
|
|
@@ -0,0 +1,60 @@
|
|
+/*
|
|
+ * dump2tar - tool to dump files and command output into a tar archive
|
|
+ *
|
|
+ * Main dump logic
|
|
+ *
|
|
+ * Copyright IBM Corp. 2016
|
|
+ */
|
|
+
|
|
+#ifndef DUMP_H
|
|
+#define DUMP_H
|
|
+
|
|
+#include <stdbool.h>
|
|
+#include <stddef.h>
|
|
+#include <sys/stat.h>
|
|
+
|
|
+#include "strarray.h"
|
|
+
|
|
+#define NUM_EXCLUDE_TYPES 7
|
|
+
|
|
+struct dump_spec {
|
|
+ char *inname;
|
|
+ char *outname;
|
|
+ bool is_cmd;
|
|
+};
|
|
+
|
|
+struct dump_opts {
|
|
+ bool add_cmd_status;
|
|
+ bool append;
|
|
+ bool dereference;
|
|
+ bool exclude_type[NUM_EXCLUDE_TYPES];
|
|
+ bool gzip;
|
|
+ bool ignore_failed_read;
|
|
+ bool no_eof;
|
|
+ bool quiet;
|
|
+ bool recursive;
|
|
+ bool threaded;
|
|
+ bool verbose;
|
|
+ const char *output_file;
|
|
+ int file_timeout;
|
|
+ int timeout;
|
|
+ long jobs;
|
|
+ long jobs_per_cpu;
|
|
+ size_t file_max_size;
|
|
+ size_t max_buffer_size;
|
|
+ size_t max_size;
|
|
+ size_t read_chunk_size;
|
|
+ struct strarray exclude;
|
|
+ struct dump_spec *specs;
|
|
+ unsigned int num_specs;
|
|
+};
|
|
+
|
|
+struct dump_opts *dump_opts_new(void);
|
|
+int dump_opts_set_type_excluded(struct dump_opts *opts, char c);
|
|
+void dump_opts_add_spec(struct dump_opts *opts, char *inname, char *outname,
|
|
+ bool is_cmd);
|
|
+void dump_opts_free(struct dump_opts *opts);
|
|
+
|
|
+int dump_to_tar(struct dump_opts *opts);
|
|
+
|
|
+#endif /* DUMP_H */
|
|
--- /dev/null
|
|
+++ b/dump2tar/include/global.h
|
|
@@ -0,0 +1,20 @@
|
|
+/*
|
|
+ * dump2tar - tool to dump files and command output into a tar archive
|
|
+ *
|
|
+ * Global variables
|
|
+ *
|
|
+ * Copyright IBM Corp. 2016
|
|
+ */
|
|
+
|
|
+#ifndef GLOBAL_H
|
|
+#define GLOBAL_H
|
|
+
|
|
+#include <stdbool.h>
|
|
+
|
|
+extern bool global_threaded;
|
|
+extern bool global_debug;
|
|
+extern bool global_verbose;
|
|
+extern bool global_quiet;
|
|
+extern bool global_timestamps;
|
|
+
|
|
+#endif /* GLOBAL_H */
|
|
--- /dev/null
|
|
+++ b/dump2tar/include/idcache.h
|
|
@@ -0,0 +1,23 @@
|
|
+/*
|
|
+ * dump2tar - tool to dump files and command output into a tar archive
|
|
+ *
|
|
+ * Caches for user and group ID lookups
|
|
+ *
|
|
+ * Copyright IBM Corp. 2016
|
|
+ */
|
|
+
|
|
+#ifndef IDCACHE_H
|
|
+#define IDCACHE_H
|
|
+
|
|
+#include <stdlib.h>
|
|
+#include <sys/types.h>
|
|
+
|
|
+/* Buffer sizes for getpwuid_r and getgid_r calls (bytes) */
|
|
+#define PWD_BUFFER_SIZE 4096
|
|
+#define GRP_BUFFER_SIZE 4096
|
|
+
|
|
+void uid_to_name(uid_t uid, char *name, size_t len);
|
|
+void gid_to_name(gid_t gid, char *name, size_t len);
|
|
+void idcache_cleanup(void);
|
|
+
|
|
+#endif /* IDCACHE_H */
|
|
--- /dev/null
|
|
+++ b/dump2tar/include/misc.h
|
|
@@ -0,0 +1,98 @@
|
|
+/*
|
|
+ * dump2tar - tool to dump files and command output into a tar archive
|
|
+ *
|
|
+ * Helper functions
|
|
+ *
|
|
+ * Copyright IBM Corp. 2016
|
|
+ */
|
|
+
|
|
+#ifndef MISC_H
|
|
+#define MISC_H
|
|
+
|
|
+#include <stdbool.h>
|
|
+#include <stdlib.h>
|
|
+#include <sys/stat.h>
|
|
+#include <time.h>
|
|
+
|
|
+#include "global.h"
|
|
+
|
|
+#include "util_libc.h"
|
|
+
|
|
+#define MSG_LEN 256
|
|
+
|
|
+#define DBG(...) \
|
|
+ do { \
|
|
+ if (global_debug) \
|
|
+ debug(__FILE__, __LINE__, ##__VA_ARGS__); \
|
|
+ } while (0)
|
|
+
|
|
+#define mwarn(fmt, ...) _mwarn(true, (fmt), ##__VA_ARGS__)
|
|
+#define mwarnx(fmt, ...) _mwarn(false, (fmt), ##__VA_ARGS__)
|
|
+
|
|
+/* Helper macro for constructing messages in variables */
|
|
+#define HANDLE_RC(rc, max, off, label) \
|
|
+ do { \
|
|
+ if ((rc) > 0) \
|
|
+ (off) += (rc); \
|
|
+ if ((off) > (max)) \
|
|
+ goto label; \
|
|
+ } while (0)
|
|
+
|
|
+/* Program exit codes */
|
|
+#define EXIT_OK 0
|
|
+#define EXIT_RUNTIME 1
|
|
+#define EXIT_USAGE 2
|
|
+
|
|
+/* Number of nanoseconds in a second */
|
|
+#define NSEC_PER_SEC 1000000000L
|
|
+#define NSEC_PER_MSEC 1000000L
|
|
+#define NSEC_PER_USEC 1000L
|
|
+
|
|
+extern struct timespec main_start_ts;
|
|
+struct dref;
|
|
+
|
|
+int misc_write_data(int fd, char *addr, size_t len);
|
|
+ssize_t misc_read_data(int fd, char *addr, size_t len);
|
|
+void inc_timespec(struct timespec *ts, time_t sec, long nsec);
|
|
+void set_timespec(struct timespec *ts, time_t sec, long nsec);
|
|
+bool ts_before(struct timespec *a, struct timespec *b);
|
|
+int snprintf_duration(char *buff, size_t len, struct timespec *start,
|
|
+ struct timespec *end);
|
|
+char *get_threadname(void);
|
|
+void debug(const char *file, unsigned long line, const char *format, ...);
|
|
+void _mwarn(bool print_errno, const char *format, ...);
|
|
+void verb(const char *format, ...);
|
|
+void info(const char *format, ...);
|
|
+#define mmalloc(len) util_zalloc(len)
|
|
+#define mcalloc(n, len) util_zalloc((n) * (len))
|
|
+#define mrealloc(ptr, len) util_realloc((ptr), (len))
|
|
+#define mstrdup(str) util_strdup(str)
|
|
+#define masprintf(fmt, ...) __masprintf(__func__, __FILE__, __LINE__, \
|
|
+ (fmt), ##__VA_ARGS__)
|
|
+char *__masprintf(const char *func, const char *file, int line,
|
|
+ const char *fmt, ...);
|
|
+#define set_threadname(fmt, ...) __set_threadname(__func__, __FILE__, \
|
|
+ __LINE__, (fmt), \
|
|
+ ##__VA_ARGS__)
|
|
+void __set_threadname(const char *func, const char *file, int line,
|
|
+ const char *fmt, ...);
|
|
+
|
|
+void clear_threadname(void);
|
|
+void chomp(char *str, char *c);
|
|
+void lchomp(char *str, char *c);
|
|
+void remove_double_slashes(char *str);
|
|
+int stat_file(bool dereference, const char *abs, const char *rel,
|
|
+ struct dref *dref, struct stat *st);
|
|
+void set_dummy_stat(struct stat *st);
|
|
+bool starts_with(const char *str, const char *prefix);
|
|
+bool ends_with(const char *str, const char *suffix);
|
|
+
|
|
+int cmd_child(int fd, char *cmd);
|
|
+int cmd_open(char *cmd, pid_t *pid_ptr);
|
|
+int cmd_close(int fd, pid_t pid, int *status_ptr);
|
|
+
|
|
+void misc_init(void);
|
|
+void misc_cleanup(void);
|
|
+void set_stdout_data(void);
|
|
+
|
|
+#endif /* MISC_H */
|
|
--- /dev/null
|
|
+++ b/dump2tar/include/strarray.h
|
|
@@ -0,0 +1,23 @@
|
|
+/*
|
|
+ * dump2tar - tool to dump files and command output into a tar archive
|
|
+ *
|
|
+ * Dynamically growing string arrays
|
|
+ *
|
|
+ * Copyright IBM Corp. 2016
|
|
+ */
|
|
+
|
|
+#ifndef STRARRAY_H
|
|
+#define STRARRAY_H
|
|
+
|
|
+/* A string array that can grow in size */
|
|
+struct strarray {
|
|
+ unsigned int num;
|
|
+ char **str;
|
|
+};
|
|
+
|
|
+void free_strarray(struct strarray *array);
|
|
+void add_str_to_strarray(struct strarray *array, const char *str);
|
|
+void add_vstr_to_strarray(struct strarray *array, const char *fmt, ...);
|
|
+int add_file_to_strarray(struct strarray *array, const char *filename);
|
|
+
|
|
+#endif /* STRARRAY_H */
|
|
--- /dev/null
|
|
+++ b/dump2tar/include/tar.h
|
|
@@ -0,0 +1,41 @@
|
|
+/*
|
|
+ * dump2tar - tool to dump files and command output into a tar archive
|
|
+ *
|
|
+ * TAR file generation
|
|
+ *
|
|
+ * Copyright IBM Corp. 2016
|
|
+ */
|
|
+
|
|
+#ifndef TAR_H
|
|
+#define TAR_H
|
|
+
|
|
+#include <stdbool.h>
|
|
+#include <stdlib.h>
|
|
+#include <sys/stat.h>
|
|
+
|
|
+#define TYPE_REGULAR '0'
|
|
+#define TYPE_LINK '2'
|
|
+#define TYPE_DIR '5'
|
|
+
|
|
+#define TAR_BLOCKSIZE 512
|
|
+
|
|
+struct buffer;
|
|
+
|
|
+/* emit_cb_t - Callback used for emitting chunks of a byte stream
|
|
+ * @data: Arbitrary pointer passed via the @data parameter of the
|
|
+ * tar_emit_file_* functions
|
|
+ * @addr: Pointer to data
|
|
+ * @len: Size of data
|
|
+ * Return %0 on success. Returning non-zero will indicate failure and abort
|
|
+ * further data emission. */
|
|
+typedef int (*emit_cb_t)(void *data, void *addr, size_t len);
|
|
+
|
|
+int tar_emit_file_from_buffer(char *filename, char *link, size_t len,
|
|
+ struct stat *stat, char type,
|
|
+ struct buffer *content, emit_cb_t emit_cb,
|
|
+ void *data);
|
|
+int tar_emit_file_from_data(char *filename, char *link, size_t len,
|
|
+ struct stat *stat, char type, void *addr,
|
|
+ emit_cb_t emit_cb, void *data);
|
|
+
|
|
+#endif /* TAR_H */
|
|
--- /dev/null
|
|
+++ b/dump2tar/man/Makefile
|
|
@@ -0,0 +1,12 @@
|
|
+# Common definitions
|
|
+include ../../common.mak
|
|
+
|
|
+all:
|
|
+
|
|
+install:
|
|
+ $(INSTALL) -d -m 755 $(DESTDIR)$(MANDIR)/man1
|
|
+ $(INSTALL) -m 644 -c dump2tar.1 $(DESTDIR)$(MANDIR)/man1
|
|
+
|
|
+clean:
|
|
+
|
|
+.PHONY: all clean
|
|
--- /dev/null
|
|
+++ b/dump2tar/man/dump2tar.1
|
|
@@ -0,0 +1,454 @@
|
|
+.\" Macro for inserting an option description prologue.
|
|
+.\" .OD <long> [<short>] [args]
|
|
+.de OD
|
|
+. ds args "
|
|
+. if !'\\$3'' .as args \fI\\$3\fP
|
|
+. if !'\\$4'' .as args \\$4
|
|
+. if !'\\$5'' .as args \fI\\$5\fP
|
|
+. if !'\\$6'' .as args \\$6
|
|
+. if !'\\$7'' .as args \fI\\$7\fP
|
|
+. PD 0
|
|
+. if !'\\$2'' .IP "\fB\-\\$2\fP \\*[args]" 4
|
|
+. if !'\\$1'' .IP "\fB\-\-\\$1\fP \\*[args]" 4
|
|
+. PD
|
|
+..
|
|
+.\" Macro for inserting code line.
|
|
+.\" .CL <text>
|
|
+.de CL
|
|
+. ds pfont \\n[.f]
|
|
+. nh
|
|
+. na
|
|
+. ft CW
|
|
+\\$*
|
|
+. ft \\*[pfont]
|
|
+. ad
|
|
+. hy
|
|
+. br
|
|
+..
|
|
+.\" Macro for inserting a man page reference.
|
|
+.\" .MP man-page section [suffix]
|
|
+.de MP
|
|
+. nh
|
|
+. na
|
|
+. BR \\$1 (\\$2)\\$3
|
|
+. ad
|
|
+. hy
|
|
+..
|
|
+.
|
|
+.TH "dump2tar" "1" "2016\-09\-02" "" ""
|
|
+.
|
|
+.SH "NAME"
|
|
+dump2tar - Gather file contents and command output into a tar archive
|
|
+.
|
|
+.
|
|
+.SH "SYNOPSIS"
|
|
+.B "dump2tar "
|
|
+.RI "[" "OPTIONS" "] " "SPECS"
|
|
+.
|
|
+.
|
|
+.SH "DESCRIPTION"
|
|
+.B dump2tar
|
|
+creates a tar archive from the contents of any files, including files of
|
|
+unknown size.
|
|
+
|
|
+Examples for files of unknown size are:
|
|
+.IP \(bu 3
|
|
+Named pipes (FIFOs)
|
|
+.PP
|
|
+.IP \(bu 3
|
|
+Particular Linux kernel debugfs or sysfs files
|
|
+.PP
|
|
+.IP \(bu 3
|
|
+Character or block devices
|
|
+.PP
|
|
+
|
|
+When adding such a file,
|
|
+.B dump2tar
|
|
+first reads all available data until an end-of-file indication is found. From
|
|
+this data, it then creates a regular file entry in the resulting tar archive.
|
|
+By default, symbolic links and directories are preserved in the archive in
|
|
+their original form.
|
|
+
|
|
+.B dump2tar
|
|
+can also:
|
|
+.IP \(bu 3
|
|
+Add files under a different name
|
|
+.PP
|
|
+.IP \(bu 3
|
|
+Run arbitrary commands and add the resulting command output as a
|
|
+regular file
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.SH "FILE SPECIFICATIONS"
|
|
+.
|
|
+This section describes the format of the
|
|
+.I SPECS
|
|
+argument mentioned in the command synopsis.
|
|
+Use the following command line syntax to identify data sources and
|
|
+to specify file names within the archive:
|
|
+.PP
|
|
+
|
|
+.TP
|
|
+.I "PATH"
|
|
+Adds the contents of the file system subtree at file system location
|
|
+.I PATH
|
|
+(with possible exceptions described by options) in the archive under the same
|
|
+file name as on the file system.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.TP
|
|
+.IR "FILENAME" ":=" "PATH"
|
|
+Adds the contents of the file at file system location
|
|
+.I PATH
|
|
+in the archive under the name specified by
|
|
+.IR FILENAME .
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.TP
|
|
+.IR "FILENAME" "|=" "CMDLINE"
|
|
+Runs the command
|
|
+.IR CMDLINE
|
|
+and captures both the resulting standard output and standard error streams.
|
|
+Adds the collected output as a regular file named
|
|
+.I FILENAME
|
|
+in the resulting archive. You can also include the resulting program exit code
|
|
+by using option \-\-add\-cmd\-status.
|
|
+.PP
|
|
+.
|
|
+You can also specify "\-\-". All specifications that follow are interpreted as
|
|
+simple file names. This is useful for archiving files that contain ":=" or "|=".
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.SH "OUTPUT OPTIONS"
|
|
+.
|
|
+.OD "output\-file" "o" "TARFILE"
|
|
+Writes the resulting tar archive to
|
|
+.IR TARFILE .
|
|
+An existing file at the specified file system location is overwritten.
|
|
+
|
|
+If this option is omitted or if "\-" is specified for
|
|
+.IR TARFILE ,
|
|
+the archive is written to the standard output stream.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "gzip" "z" ""
|
|
+Compresses the resulting tar archive using gzip.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "max\-size" "m" "VALUE"
|
|
+Sets an upper size limit, in bytes, for the resulting archive. If this limit
|
|
+is exceeded after adding a file, no further files are added.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "timeout" "t" "VALUE"
|
|
+Sets an upper time limit, in seconds, for the archiving process. If this limit
|
|
+is exceeded while adding a file, that file is truncated and no
|
|
+further files are added.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "no-eof" "" ""
|
|
+Does not write an end-of-file marker.
|
|
+
|
|
+Use this option if you want to create an archive that can be extended by
|
|
+appending additional tar archive data.
|
|
+
|
|
+Note: Do not use this option for the final data to be added.
|
|
+A valid tar archive requires a trailing end-of-file marker.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "append" "" ""
|
|
+Appends data to the end of the archive.
|
|
+
|
|
+Use this option to incrementally build a tar file by repeatedly calling
|
|
+.BR dump2tar .
|
|
+You must specify the \-\-no\-eof option for each but the final call of
|
|
+.BR dump2tar .
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "add-cmd-status" "" ""
|
|
+Adds a separate file named
|
|
+.RI \(dq FILENAME .cmdstatus\(dq
|
|
+for each command output added through the
|
|
+.RI \(dq FILENAME |= CMDLINE \(dq
|
|
+notation (see FILE SPECIFICATIONS).
|
|
+This file contains information about the exit status of the
|
|
+process that executed the command:
|
|
+.
|
|
+.RS 8
|
|
+.TP
|
|
+.RI EXITSTATUS= VALUE
|
|
+Unless
|
|
+.I VALUE
|
|
+is -1, the process ended normally with the specified exit value.
|
|
+.PP
|
|
+.
|
|
+.TP
|
|
+.RI TERMSIG= VALUE
|
|
+Unless
|
|
+.I VALUE
|
|
+is -1, the process was stopped by a signal of the specified number.
|
|
+.PP
|
|
+.
|
|
+.TP
|
|
+.RI WAITPID_ERRNO= VALUE
|
|
+Unless
|
|
+.I VALUE
|
|
+is -1, an attempt to obtain the status of the process failed with the
|
|
+specified error.
|
|
+.PP
|
|
+.RE
|
|
+.
|
|
+.
|
|
+.
|
|
+.SH "INPUT OPTIONS"
|
|
+.
|
|
+.OD "files\-from" "F" "FILENAME"
|
|
+Reads input data specifications (see FILE SPECIFICATIONS) from
|
|
+.IR FILENAME ,
|
|
+one specification per line. Each line contains either a file name or a
|
|
+.IR FILENAME := PATH
|
|
+or
|
|
+.IR FILENAME |= CMDLINE
|
|
+specification. Empty lines are ignored.
|
|
+
|
|
+A line can also consist of only "\-\-". All lines following this specification
|
|
+are interpreted as simple file names. This is useful for archiving files that
|
|
+contain ":=" or "|=".
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "ignore\-failed\-read" "i" ""
|
|
+Continues after read errors.
|
|
+
|
|
+By default,
|
|
+.B dump2tar
|
|
+stops processing after encountering errors while reading an input file.
|
|
+With this option,
|
|
+.B dump2tar
|
|
+prints a warning message and adds an empty entry for the erroneous file in
|
|
+the archive.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "buffer\-size" "b" "VALUE"
|
|
+Reads data from input files in chunks of
|
|
+.I VALUE
|
|
+bytes. Large values can accelerate the archiving process for large files
|
|
+at the cost of increased memory usage. The default value is 1048576.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "file\-timeout" "T" "VALUE"
|
|
+Sets an upper time limit, in seconds, for reading an input file.
|
|
+
|
|
+.B dump2tar
|
|
+stops processing a file when the time limit is exceeded. Archive entries for
|
|
+such files are truncated to the amount of data that is collected by the time
|
|
+the limit is reached.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "file\-max\-size" "M" "N"
|
|
+Sets an upper size limit, in bytes, for an input file.
|
|
+
|
|
+.B dump2tar
|
|
+stops processing a file when the size limit is exceeded. Archive entries for
|
|
+such files are truncated to the specified size.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "jobs" "j" "N"
|
|
+By default,
|
|
+.B dump2tar
|
|
+processes one file at a time. With this option,
|
|
+.B dump2tar
|
|
+processes
|
|
+.I N
|
|
+files in parallel.
|
|
+
|
|
+Parallel processing can accelerate the archiving process,
|
|
+especially if input files are located on slow devices, or when output from
|
|
+multiple commands is added to the archive.
|
|
+
|
|
+Note: Use
|
|
+.B tar
|
|
+option \-\-delay\-directory\-restore when extracting files from an archive
|
|
+created with \-\-jobs to prevent conflicts with directory permissions and
|
|
+modification times.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "jobs\-per\-cpu" "J" "N"
|
|
+Processes
|
|
+.I N
|
|
+files for each online CPU in parallel.
|
|
+
|
|
+Parallel processing can accelerate the
|
|
+archiving process, especially if input files are located on slow devices, or
|
|
+when output from multiple commands is added to the archive.
|
|
+
|
|
+Note: Use
|
|
+.B tar
|
|
+option \-\-delay\-directory\-restore when extracting files from an archive
|
|
+created with \-\-jobs\-per\-cpu to prevent conflicts with directory permissions
|
|
+and modification times.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "exclude" "x" "PATTERN"
|
|
+Does not add files to the archive if their file names match
|
|
+.IR PATTERN .
|
|
+.I PATTERN
|
|
+is an expression that uses the shell wildcards.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "exclude\-from" "X" "FILENAME"
|
|
+Does not add files to the archive if their names match at least one of the
|
|
+patterns listed in the pattern file with name
|
|
+.IR FILENAME .
|
|
+In the pattern file, each line specifies an expression that uses the
|
|
+shell wildcards.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "exclude\-type" "" "TYPE"
|
|
+Does not add files to the archive if they match at least one of the file types
|
|
+specified with
|
|
+.IR TYPE .
|
|
+.I TYPE
|
|
+uses one or more of the characters "fdcbpls", where:
|
|
+
|
|
+.RS 8
|
|
+.IP f 3
|
|
+regular files
|
|
+.PP
|
|
+.IP d 3
|
|
+directories
|
|
+.PP
|
|
+.IP c 3
|
|
+character devices
|
|
+.PP
|
|
+.IP b 3
|
|
+block devices
|
|
+.PP
|
|
+.IP p 3
|
|
+named pipes (FIFOs)
|
|
+.PP
|
|
+.IP l 3
|
|
+symbolic links
|
|
+.PP
|
|
+.IP s 3
|
|
+sockets
|
|
+.PP
|
|
+.RE
|
|
+.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "dereference" "" ""
|
|
+Adds the content of link targets instead of symbolic links.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "no\-recursion" "" ""
|
|
+Does not add files from sub\-directories.
|
|
+
|
|
+By default,
|
|
+.B dump2tar
|
|
+adds archive entries for specified directories, and for the files within these
|
|
+directories. With this option, a specified directory results in a single entry
|
|
+for the directory. Any contained files to be included must be specified
|
|
+explicitly.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.SH "MISC OPTIONS"
|
|
+.
|
|
+.OD "help" "h" ""
|
|
+Prints an overview of available options, then exits.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "verbose" "V" ""
|
|
+Prints additional informational output.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.OD "quiet" "q" ""
|
|
+Suppresses printing of informational output.
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.
|
|
+.SH "EXAMPLES"
|
|
+.
|
|
+.\fB
|
|
+.CL # dump2tar a b \-o archive.tar
|
|
+.\fR
|
|
+
|
|
+.RS 4
|
|
+Creates a tar archive named archive.tar containing files a and b.
|
|
+.RE
|
|
+.PP
|
|
+.
|
|
+.\fB
|
|
+.CL # dump2tar /proc \-o procdump.tar.gz \-z \-i \-T 1 \-M 1048576
|
|
+.\fR
|
|
+
|
|
+.RS 4
|
|
+Creates a gzip compressed tar archive named procdump.tar.gz that contains
|
|
+all procfs files. Unreadable files are ignored. Files are truncated when the
|
|
+first of the two limiting conditions is reached, either 1048576 bytes of
|
|
+content or the reading time of 1 second.
|
|
+.RE
|
|
+.PP
|
|
+.
|
|
+.\fB
|
|
+.CL # dump2tar '|=dmesg' '|=lspci' \-o data.tar
|
|
+.\fR
|
|
+
|
|
+.RS 4
|
|
+Creates a tar archive named data.tar containing the output of the 'dmesg'
|
|
+and 'lspci' commands.
|
|
+.RE
|
|
+.PP
|
|
+.
|
|
+.\fB
|
|
+.CL # dump2tar /sys/kernel/debug/ -x '*/tracing/*' -o debug.tar -i
|
|
+.\fR
|
|
+
|
|
+.RS 4
|
|
+Creates a tar archive named debug.tar containing the contents of directory
|
|
+/sys/kernel/debug/ while excluding any file that is located in a sub-directory
|
|
+named 'tracing'.
|
|
+.RE
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.SH "EXIT CODES"
|
|
+.TP
|
|
+.B 0
|
|
+The program finished successfully
|
|
+.TP
|
|
+.B 1
|
|
+A run-time error occurred
|
|
+.TP
|
|
+.B 2
|
|
+The specified command was not valid
|
|
+.PP
|
|
+.
|
|
+.
|
|
+.SH "SEE ALSO"
|
|
+.MP dump2tar 1 ,
|
|
+.MP tar 1
|
|
--- /dev/null
|
|
+++ b/dump2tar/src/Makefile
|
|
@@ -0,0 +1,36 @@
|
|
+# Common definitions
|
|
+include ../../common.mak
|
|
+
|
|
+CPPFLAGS += -I ../../include -I../include -std=gnu99 -Wno-unused-parameter
|
|
+LDLIBS += -lpthread -lrt
|
|
+ifneq ($(HAVE_ZLIB),0)
|
|
+CPPFLAGS += -DHAVE_ZLIB
|
|
+LDLIBS += -lz
|
|
+endif
|
|
+
|
|
+core_objects = buffer.o dref.o global.o dump.o idcache.o misc.o strarray.o tar.o
|
|
+libs = $(rootdir)/libutil/util_libc.o $(rootdir)/libutil/util_opt.o \
|
|
+ $(rootdir)/libutil/util_prg.o $(rootdir)/libutil/util_panic.o
|
|
+
|
|
+check_dep_zlib:
|
|
+ $(call check_dep, \
|
|
+ "dump2tar", \
|
|
+ "zlib.h", \
|
|
+ "zlib-devel", \
|
|
+ "HAVE_ZLIB=0")
|
|
+
|
|
+all: check_dep_zlib dump2tar
|
|
+
|
|
+dump2tar: $(core_objects) dump2tar.o $(libs)
|
|
+
|
|
+install: dump2tar
|
|
+ $(INSTALL) -c dump2tar $(DESTDIR)$(USRBINDIR)
|
|
+
|
|
+clean:
|
|
+ @rm -f dump2tar *.o
|
|
+
|
|
+.PHONY: all install clean
|
|
+
|
|
+# Additional manual dependencies
|
|
+../../libutil/%.o:
|
|
+ make -C ../../libutil $<
|
|
--- /dev/null
|
|
+++ b/dump2tar/src/buffer.c
|
|
@@ -0,0 +1,271 @@
|
|
+/*
|
|
+ * dump2tar - tool to dump files and command output into a tar archive
|
|
+ *
|
|
+ * Data buffering functions
|
|
+ *
|
|
+ * Copyright IBM Corp. 2016
|
|
+ */
|
|
+
|
|
+#include "buffer.h"
|
|
+
|
|
+#include <stdio.h>
|
|
+#include <stdlib.h>
|
|
+#include <string.h>
|
|
+#include <sys/types.h>
|
|
+#include <unistd.h>
|
|
+
|
|
+#include "misc.h"
|
|
+
|
|
+void buffer_print(struct buffer *buffer)
|
|
+{
|
|
+ fprintf(stderr, "DEBUG: buffer at %p\n", (void *) buffer);
|
|
+ if (!buffer)
|
|
+ return;
|
|
+ fprintf(stderr, "DEBUG: total=%zu\n", buffer->total);
|
|
+ fprintf(stderr, "DEBUG: off=%zu\n", buffer->off);
|
|
+ fprintf(stderr, "DEBUG: size=%zu\n", buffer->size);
|
|
+ fprintf(stderr, "DEBUG: addr=%p\n", (void *) buffer->addr);
|
|
+ fprintf(stderr, "DEBUG: fd_open=%d\n", buffer->fd_open);
|
|
+ fprintf(stderr, "DEBUG: fd=%d\n", buffer->fd);
|
|
+ if (buffer->fd_open) {
|
|
+ fprintf(stderr, "DEBUG: fd->pos=%zu\n",
|
|
+ lseek(buffer->fd, 0, SEEK_CUR));
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Initialize @buffer to hold @size bytes in memory */
|
|
+void buffer_init(struct buffer *buffer, size_t size)
|
|
+{
|
|
+ memset(buffer, 0, sizeof(struct buffer));
|
|
+ buffer->addr = mmalloc(size);
|
|
+ buffer->size = size;
|
|
+}
|
|
+
|
|
+/* Allocate a new buffer for holding @size bytes in memory */
|
|
+struct buffer *buffer_alloc(size_t size)
|
|
+{
|
|
+ struct buffer *buffer;
|
|
+
|
|
+ buffer = mmalloc(sizeof(struct buffer));
|
|
+ buffer_init(buffer, size);
|
|
+
|
|
+ return buffer;
|
|
+}
|
|
+
|
|
+/* Forget about any data stored in @buffer */
|
|
+void buffer_reset(struct buffer *buffer)
|
|
+{
|
|
+ buffer->total = 0;
|
|
+ buffer->off = 0;
|
|
+ if (buffer->fd_open) {
|
|
+ if (ftruncate(buffer->fd, 0))
|
|
+ mwarn("Cannot truncate temporary file");
|
|
+ if (lseek(buffer->fd, 0, SEEK_SET) == (off_t) -1)
|
|
+ mwarn("Cannot seek in temporary file");
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Close buffer file associated with @buffer */
|
|
+void buffer_close(struct buffer *buffer)
|
|
+{
|
|
+ if (!buffer->fd_open)
|
|
+ return;
|
|
+
|
|
+ fclose(buffer->file);
|
|
+ buffer->fd = 0;
|
|
+ buffer->fd_open = false;
|
|
+}
|
|
+
|
|
+/* Release all resources associated with @buffer. If @dyn is %true, also free
|
|
+ * @buffer itself. */
|
|
+void buffer_free(struct buffer *buffer, bool dyn)
|
|
+{
|
|
+ if (!buffer)
|
|
+ return;
|
|
+ buffer_reset(buffer);
|
|
+ buffer_close(buffer);
|
|
+ free(buffer->addr);
|
|
+ if (dyn)
|
|
+ free(buffer);
|
|
+}
|
|
+
|
|
+/* Open a buffer file for @buffer. Return %EXIT_OK on success, %EXIT_RUNTIME
|
|
+ * otherwise. */
|
|
+int buffer_open(struct buffer *buffer)
|
|
+{
|
|
+ if (buffer->fd_open)
|
|
+ return EXIT_OK;
|
|
+
|
|
+ buffer->file = tmpfile();
|
|
+ if (!buffer->file) {
|
|
+ mwarn("Could not create temporary file");
|
|
+ return EXIT_RUNTIME;
|
|
+ }
|
|
+
|
|
+ buffer->fd = fileno(buffer->file);
|
|
+ buffer->fd_open = true;
|
|
+
|
|
+ return EXIT_OK;
|
|
+}
|
|
+
|
|
+/* Write data in memory of @buffer to buffer file. Return %EXIT_OK on success,
|
|
+ * %EXIT_RUNTIME otherwise. */
|
|
+int buffer_flush(struct buffer *buffer)
|
|
+{
|
|
+ if (buffer->off == 0)
|
|
+ return EXIT_OK;
|
|
+ if (buffer_open(buffer))
|
|
+ return EXIT_RUNTIME;
|
|
+ if (misc_write_data(buffer->fd, buffer->addr, buffer->off)) {
|
|
+ mwarn("Could not write to temporary file");
|
|
+ return EXIT_RUNTIME;
|
|
+ }
|
|
+ buffer->off = 0;
|
|
+
|
|
+ return EXIT_OK;
|
|
+}
|
|
+
|
|
+/* Try to ensure that at least @size bytes are available at
|
|
+ * @buffer->addr[buffer->off]. Return the actual number of bytes available or
|
|
+ * @-1 on error. If @usefile is %true, make use of a buffer file if
|
|
+ * the total buffer size exceeds @max_buffer_size. */
|
|
+ssize_t buffer_make_room(struct buffer *buffer, size_t size, bool usefile,
|
|
+ size_t max_buffer_size)
|
|
+{
|
|
+ size_t needsize;
|
|
+
|
|
+ if (size > max_buffer_size && usefile)
|
|
+ size = max_buffer_size;
|
|
+
|
|
+ needsize = buffer->off + size;
|
|
+ if (needsize <= buffer->size) {
|
|
+ /* Room available */
|
|
+ return size;
|
|
+ }
|
|
+
|
|
+ if (needsize > max_buffer_size && usefile) {
|
|
+ /* Need to write out memory buffer to buffer file */
|
|
+ if (buffer_flush(buffer))
|
|
+ return -1;
|
|
+ if (size <= buffer->size)
|
|
+ return size;
|
|
+ needsize = size;
|
|
+ }
|
|
+
|
|
+ /* Need to increase memory buffer size */
|
|
+ buffer->size = needsize;
|
|
+ buffer->addr = mrealloc(buffer->addr, buffer->size);
|
|
+
|
|
+ return size;
|
|
+}
|
|
+
|
|
+/* Try to read @chunk bytes from @fd to @buffer. Return the number of bytes
|
|
+ * read on success, %0 on EOF or %-1 on error. */
|
|
+ssize_t buffer_read_fd(struct buffer *buffer, int fd, size_t chunk,
|
|
+ bool usefile, size_t max_buffer_size)
|
|
+{
|
|
+ ssize_t c = buffer_make_room(buffer, chunk, usefile, max_buffer_size);
|
|
+
|
|
+ DBG("buffer_read_fd wanted %zd got %zd", chunk, c);
|
|
+ if (c < 0)
|
|
+ return c;
|
|
+
|
|
+ c = read(fd, buffer->addr + buffer->off, c);
|
|
+ if (c > 0) {
|
|
+ buffer->total += c;
|
|
+ buffer->off += c;
|
|
+ }
|
|
+
|
|
+ return c;
|
|
+}
|
|
+
|
|
+/* Add @len bytes at @addr to @buffer. If @addr is %NULL, add zeroes. Return
|
|
+ * %EXIT_OK on success, %EXIT_RUNTIME otherwise. */
|
|
+int buffer_add_data(struct buffer *buffer, char *addr, size_t len, bool usefile,
|
|
+ size_t max_buffer_size)
|
|
+{
|
|
+ ssize_t c;
|
|
+
|
|
+ while (len > 0) {
|
|
+ c = buffer_make_room(buffer, len, usefile, max_buffer_size);
|
|
+ if (c < 0)
|
|
+ return EXIT_RUNTIME;
|
|
+ if (addr) {
|
|
+ memcpy(buffer->addr + buffer->off, addr, c);
|
|
+ addr += c;
|
|
+ } else {
|
|
+ memset(buffer->addr + buffer->off, 0, c);
|
|
+ }
|
|
+ buffer->total += c;
|
|
+ buffer->off += c;
|
|
+
|
|
+ len -= c;
|
|
+ }
|
|
+
|
|
+ return EXIT_OK;
|
|
+}
|
|
+
|
|
+/* Call @cb for all chunks of data in @buffer. @data is passed to @cb. */
|
|
+int buffer_iterate(struct buffer *buffer, buffer_cb_t cb, void *data)
|
|
+{
|
|
+ int rc;
|
|
+ ssize_t r;
|
|
+
|
|
+ if (buffer->total == 0)
|
|
+ return EXIT_OK;
|
|
+
|
|
+ if (!buffer->fd_open)
|
|
+ return cb(data, buffer->addr, buffer->off);
|
|
+
|
|
+ /* Free memory buffer to be used as copy buffer */
|
|
+ if (buffer_flush(buffer))
|
|
+ return EXIT_RUNTIME;
|
|
+ if (lseek(buffer->fd, 0, SEEK_SET) == (off_t) -1) {
|
|
+ mwarn("Cannot seek in temporary file");
|
|
+ return EXIT_RUNTIME;
|
|
+ }
|
|
+
|
|
+ /* Copy data from temporary file to target file */
|
|
+ while ((r = misc_read_data(buffer->fd, buffer->addr,
|
|
+ buffer->size)) != 0) {
|
|
+ if (r < 0) {
|
|
+ mwarn("Cannot read from temporary file");
|
|
+ return EXIT_RUNTIME;
|
|
+ }
|
|
+ rc = cb(data, buffer->addr, r);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+ }
|
|
+
|
|
+ return EXIT_OK;
|
|
+}
|
|
+
|
|
+/* Truncate @buffer to at most @len bytes */
|
|
+int buffer_truncate(struct buffer *buffer, size_t len)
|
|
+{
|
|
+ size_t delta;
|
|
+
|
|
+ if (buffer->total <= len)
|
|
+ return EXIT_OK;
|
|
+
|
|
+ delta = buffer->total - len;
|
|
+
|
|
+ buffer->total = len;
|
|
+ if (buffer->fd_open && delta > buffer->off) {
|
|
+ /* All of memory and some of file buffer is truncated */
|
|
+ buffer->off = 0;
|
|
+ if (ftruncate(buffer->fd, len)) {
|
|
+ mwarn("Cannot truncate temporary file");
|
|
+ return EXIT_RUNTIME;
|
|
+ }
|
|
+ if (lseek(buffer->fd, len, SEEK_SET) == (off_t) -1) {
|
|
+ mwarn("Cannot seek in temporary file");
|
|
+ return EXIT_RUNTIME;
|
|
+ }
|
|
+ } else {
|
|
+ /* Only memory buffer is truncated */
|
|
+ buffer->off -= delta;
|
|
+ }
|
|
+
|
|
+ return EXIT_OK;
|
|
+}
|
|
--- /dev/null
|
|
+++ b/dump2tar/src/dref.c
|
|
@@ -0,0 +1,92 @@
|
|
+/*
|
|
+ * dump2tar - tool to dump files and command output into a tar archive
|
|
+ *
|
|
+ * Reference counting for directory handles
|
|
+ *
|
|
+ * Copyright IBM Corp. 2016
|
|
+ */
|
|
+
|
|
+#include "dref.h"
|
|
+
|
|
+#include <dirent.h>
|
|
+#include <pthread.h>
|
|
+#include <sys/types.h>
|
|
+
|
|
+#include "global.h"
|
|
+#include "misc.h"
|
|
+
|
|
+/* dref_mutex serializes access to drefs */
|
|
+static pthread_mutex_t dref_mutex = PTHREAD_MUTEX_INITIALIZER;
|
|
+
|
|
+static unsigned long num_open_dirs;
|
|
+static unsigned long num_open_dirs_max;
|
|
+
|
|
+/* Lock dref mutex */
|
|
+static void dref_lock(void)
|
|
+{
|
|
+ if (!global_threaded)
|
|
+ return;
|
|
+ pthread_mutex_lock(&dref_mutex);
|
|
+}
|
|
+
|
|
+/* Unlock dref mutex */
|
|
+static void dref_unlock(void)
|
|
+{
|
|
+ if (!global_threaded)
|
|
+ return;
|
|
+ pthread_mutex_unlock(&dref_mutex);
|
|
+}
|
|
+
|
|
+/* Create a reference count managed directory handle for @dirname */
|
|
+struct dref *dref_create(const char *dirname)
|
|
+{
|
|
+ struct dref *dref;
|
|
+ DIR *dd;
|
|
+
|
|
+ dd = opendir(dirname);
|
|
+ DBG("opendir(%s)=%p (total=%lu)", dirname, dd, ++num_open_dirs);
|
|
+ if (!dd) {
|
|
+ num_open_dirs--;
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (num_open_dirs > num_open_dirs_max)
|
|
+ num_open_dirs_max = num_open_dirs;
|
|
+
|
|
+ dref = mmalloc(sizeof(struct dref));
|
|
+ dref->dd = dd;
|
|
+ dref->dirfd = dirfd(dd);
|
|
+ dref->count = 1;
|
|
+
|
|
+ return dref;
|
|
+}
|
|
+
|
|
+/* Obtain a reference to @dref */
|
|
+struct dref *dref_get(struct dref *dref)
|
|
+{
|
|
+ if (dref) {
|
|
+ dref_lock();
|
|
+ dref->count++;
|
|
+ dref_unlock();
|
|
+ }
|
|
+
|
|
+ return dref;
|
|
+}
|
|
+
|
|
+/* Release a reference to @dref. If this was the last reference, lose the
|
|
+ * associated directory handle and free @dref. */
|
|
+void dref_put(struct dref *dref)
|
|
+{
|
|
+ if (dref) {
|
|
+ dref_lock();
|
|
+ dref->count--;
|
|
+ if (dref->count == 0) {
|
|
+ num_open_dirs--;
|
|
+ DBG("closedir(%p) (total=%lu, max=%lu)", dref->dd,
|
|
+ num_open_dirs, num_open_dirs_max);
|
|
+ closedir(dref->dd);
|
|
+ free(dref);
|
|
+ }
|
|
+ dref_unlock();
|
|
+ }
|
|
+}
|
|
--- /dev/null
|
|
+++ b/dump2tar/src/dump.c
|
|
@@ -0,0 +1,1850 @@
|
|
+/*
|
|
+ * dump2tar - tool to dump files and command output into a tar archive
|
|
+ *
|
|
+ * Main dump logic
|
|
+ *
|
|
+ * Copyright IBM Corp. 2016
|
|
+ */
|
|
+
|
|
+#include "dump.h"
|
|
+
|
|
+#include <errno.h>
|
|
+#include <fcntl.h>
|
|
+#include <fnmatch.h>
|
|
+#include <getopt.h>
|
|
+#include <pthread.h>
|
|
+#include <stdarg.h>
|
|
+#include <stdio.h>
|
|
+#include <string.h>
|
|
+#include <sys/stat.h>
|
|
+#include <sys/types.h>
|
|
+#include <unistd.h>
|
|
+
|
|
+#ifdef HAVE_ZLIB
|
|
+#include <zlib.h>
|
|
+#endif /* HAVE_ZLIB */
|
|
+
|
|
+#include "buffer.h"
|
|
+#include "dref.h"
|
|
+#include "global.h"
|
|
+#include "idcache.h"
|
|
+#include "misc.h"
|
|
+#include "tar.h"
|
|
+
|
|
+/* Default input file read size (bytes) */
|
|
+#define DEFAULT_READ_CHUNK_SIZE (512 * 1024)
|
|
+#define DEFAULT_MAX_BUFFER_SIZE (2 * 1024 * 1024)
|
|
+
|
|
+#define _SET_ABORTED(task) _set_aborted((task), __func__, __LINE__)
|
|
+#define SET_ABORTED(task) set_aborted((task), __func__, __LINE__)
|
|
+
|
|
+#define read_error(task, filename, fmt, ...) \
|
|
+ do { \
|
|
+ if (!(task)->opts->ignore_failed_read) \
|
|
+ SET_ABORTED((task)); \
|
|
+ _mwarn(true, "%s: " fmt, (filename), ##__VA_ARGS__); \
|
|
+ } while (0)
|
|
+
|
|
+#define write_error(task, fmt, ...) \
|
|
+ do { \
|
|
+ SET_ABORTED((task)); \
|
|
+ _mwarn(true, "%s: " fmt, (task)->opts->output_file, \
|
|
+ ##__VA_ARGS__); \
|
|
+ } while (0)
|
|
+
|
|
+#define tverb(fmt, ...) \
|
|
+ do { \
|
|
+ if (task->opts->verbose) \
|
|
+ verb((fmt), ##__VA_ARGS__); \
|
|
+ } while (0)
|
|
+
|
|
+
|
|
+/* Jobs representing a file or command output to add */
|
|
+struct job {
|
|
+ struct job *next_job;
|
|
+ enum job_type {
|
|
+ JOB_INIT, /* Initialization work */
|
|
+ JOB_FILE, /* Add a regular file */
|
|
+ JOB_LINK, /* Add a symbolic link */
|
|
+ JOB_DIR, /* Add a directory */
|
|
+ JOB_CMD, /* Add command output */
|
|
+ } type;
|
|
+ enum job_status {
|
|
+ JOB_QUEUED, /* Transient: Job processing has not started */
|
|
+ JOB_IN_PROGRESS,/* Transient: Job processing has started */
|
|
+ JOB_EXCLUDED, /* Final: File was excluded */
|
|
+ JOB_FAILED, /* Final: Data could not be obtained */
|
|
+ JOB_DONE, /* Final: All data was obtained */
|
|
+ JOB_PARTIAL, /* Final: Only some data was obtained */
|
|
+ } status;
|
|
+ char *outname;
|
|
+ char *inname;
|
|
+ char *relname;
|
|
+ struct stat stat;
|
|
+ bool timed;
|
|
+ struct timespec deadline;
|
|
+ struct dref *dref;
|
|
+ int cmd_status;
|
|
+ struct buffer *content;
|
|
+};
|
|
+
|
|
+/* Run-time statistics */
|
|
+struct stats {
|
|
+ unsigned long num_done;
|
|
+ unsigned long num_excluded;
|
|
+ unsigned long num_failed;
|
|
+ unsigned long num_partial;
|
|
+};
|
|
+
|
|
+/* Information specific to a single dump task */
|
|
+struct task {
|
|
+ /* Input */
|
|
+ struct dump_opts *opts;
|
|
+
|
|
+ /* State */
|
|
+
|
|
+ /* mutex serializes access to global data */
|
|
+ pthread_mutex_t mutex;
|
|
+ pthread_cond_t worker_cond;
|
|
+ pthread_cond_t cond;
|
|
+ unsigned long num_jobs_active;
|
|
+ struct job *jobs_head;
|
|
+ struct job *jobs_tail;
|
|
+ bool aborted;
|
|
+
|
|
+ /* output_mutex serializes access to output file */
|
|
+ pthread_mutex_t output_mutex;
|
|
+ int output_fd;
|
|
+ size_t output_written;
|
|
+#ifdef HAVE_ZLIB
|
|
+ gzFile output_gzfd;
|
|
+#endif /* HAVE_ZLIB */
|
|
+ unsigned long output_num_files;
|
|
+
|
|
+ /* No protection needed (only accessed in single-threaded mode) */
|
|
+ struct stats stats;
|
|
+ struct timespec start_ts;
|
|
+};
|
|
+
|
|
+/* Per thread management data */
|
|
+struct per_thread {
|
|
+ long num;
|
|
+ pthread_t thread;
|
|
+ bool running;
|
|
+ bool timed_out;
|
|
+ struct stats stats;
|
|
+ struct job *job;
|
|
+ struct buffer buffer;
|
|
+ struct task *task;
|
|
+};
|
|
+
|
|
+static const struct {
|
|
+ mode_t mode;
|
|
+ char c;
|
|
+} exclude_types[NUM_EXCLUDE_TYPES] = {
|
|
+ { S_IFREG, 'f' },
|
|
+ { S_IFDIR, 'd' },
|
|
+ { S_IFCHR, 'c' },
|
|
+ { S_IFBLK, 'b' },
|
|
+ { S_IFIFO, 'p' },
|
|
+ { S_IFLNK, 'l' },
|
|
+ { S_IFSOCK, 's' },
|
|
+};
|
|
+
|
|
+/* Lock main mutex */
|
|
+static void main_lock(struct task *task)
|
|
+{
|
|
+ if (!global_threaded)
|
|
+ return;
|
|
+ DBG("main lock");
|
|
+ pthread_mutex_lock(&task->mutex);
|
|
+}
|
|
+
|
|
+/* Unlock main mutex */
|
|
+static void main_unlock(struct task *task)
|
|
+{
|
|
+ if (!global_threaded)
|
|
+ return;
|
|
+ DBG("main unlock");
|
|
+ pthread_mutex_unlock(&task->mutex);
|
|
+}
|
|
+
|
|
+/* Lock output mutex */
|
|
+static void output_lock(struct task *task)
|
|
+{
|
|
+ if (!global_threaded)
|
|
+ return;
|
|
+ pthread_mutex_lock(&task->output_mutex);
|
|
+}
|
|
+
|
|
+/* Unlock output mutex */
|
|
+static void output_unlock(struct task *task)
|
|
+{
|
|
+ if (!global_threaded)
|
|
+ return;
|
|
+ pthread_mutex_unlock(&task->output_mutex);
|
|
+}
|
|
+
|
|
+/* Wake up all waiting workers */
|
|
+static void _worker_wakeup_all(struct task *task)
|
|
+{
|
|
+ if (!global_threaded)
|
|
+ return;
|
|
+ DBG("waking up all worker threads");
|
|
+ pthread_cond_broadcast(&task->worker_cond);
|
|
+}
|
|
+
|
|
+/* Wake up one waiting worker */
|
|
+static void _worker_wakeup_one(struct task *task)
|
|
+{
|
|
+ if (!global_threaded)
|
|
+ return;
|
|
+ DBG("waking up one worker thread");
|
|
+ pthread_cond_signal(&task->worker_cond);
|
|
+}
|
|
+
|
|
+/* Wait for a signal to a worker */
|
|
+static int _worker_wait(struct task *task)
|
|
+{
|
|
+ int rc;
|
|
+
|
|
+ DBG("waiting for signal to worker");
|
|
+ rc = pthread_cond_wait(&task->worker_cond, &task->mutex);
|
|
+ DBG("waiting for signal to worker done (rc=%d)", rc);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/* Wake up main thread */
|
|
+static void _main_wakeup(struct task *task)
|
|
+{
|
|
+ if (!global_threaded)
|
|
+ return;
|
|
+ DBG("waking up main thread");
|
|
+ pthread_cond_broadcast(&task->cond);
|
|
+}
|
|
+
|
|
+/* Wait for a signal to the main thread */
|
|
+static int _main_wait(struct task *task)
|
|
+{
|
|
+ int rc;
|
|
+
|
|
+ DBG("waiting for status change");
|
|
+ rc = pthread_cond_wait(&task->cond, &task->mutex);
|
|
+ DBG("waiting for status change done (rc=%d)", rc);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/* Wait for a signal to the main thread. Abort waiting after @deadline */
|
|
+static int _main_wait_timed(struct task *task, struct timespec *deadline)
|
|
+{
|
|
+ int rc;
|
|
+
|
|
+ DBG("timed waiting for status change");
|
|
+ rc = pthread_cond_timedwait(&task->cond, &task->mutex, deadline);
|
|
+ DBG("timed waiting for status change done (rc=%d)", rc);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/* Allow thread to be canceled */
|
|
+static void cancel_enable(void)
|
|
+{
|
|
+ if (!global_threaded)
|
|
+ return;
|
|
+ if (pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
|
|
+ mwarn("pthread_setcancelstate");
|
|
+}
|
|
+
|
|
+/* Prevent thread from being canceled */
|
|
+static void cancel_disable(void)
|
|
+{
|
|
+ if (!global_threaded)
|
|
+ return;
|
|
+ if (pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL) != 0)
|
|
+ mwarn("pthread_setcancelstate");
|
|
+}
|
|
+
|
|
+/* Abort processing and inform all threads to shutdown. Must be called with
|
|
+ * task->mutex locked */
|
|
+static void _set_aborted(struct task *task, const char *func, unsigned int line)
|
|
+{
|
|
+ DBG("set aborted at %s:%u", func, line);
|
|
+ task->aborted = true;
|
|
+ _worker_wakeup_all(task);
|
|
+ _main_wakeup(task);
|
|
+}
|
|
+
|
|
+/* Abort processing and inform all threads to shutdown */
|
|
+static void set_aborted(struct task *task, const char *func, unsigned int line)
|
|
+{
|
|
+ main_lock(task);
|
|
+ _set_aborted(task, func, line);
|
|
+ main_unlock(task);
|
|
+}
|
|
+
|
|
+/* Check if abort processing has been initiated */
|
|
+static bool is_aborted(struct task *task)
|
|
+{
|
|
+ bool result;
|
|
+
|
|
+ main_lock(task);
|
|
+ result = task->aborted;
|
|
+ main_unlock(task);
|
|
+
|
|
+ return result;
|
|
+}
|
|
+
|
|
+/* Release resources associated with @job */
|
|
+static void free_job(struct task *task, struct job *job)
|
|
+{
|
|
+ DBG("free job %p (%s)", job, job->inname);
|
|
+ if (!job)
|
|
+ return;
|
|
+ free(job->inname);
|
|
+ free(job->outname);
|
|
+ free(job->relname);
|
|
+ dref_put(job->dref);
|
|
+ free(job);
|
|
+}
|
|
+
|
|
+/* Check if file type specified by mode @m was marked as excluded */
|
|
+static bool is_type_excluded(struct dump_opts *opts, mode_t m)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ m &= S_IFMT;
|
|
+ for (i = 0; i < NUM_EXCLUDE_TYPES; i++) {
|
|
+ if (exclude_types[i].mode == m)
|
|
+ return opts->exclude_type[i];
|
|
+ }
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/* Replace all '/' characters in @filename with '_' */
|
|
+static void escape_filename(char *filename)
|
|
+{
|
|
+ for (; *filename; filename++) {
|
|
+ if (*filename == '/')
|
|
+ *filename = '_';
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Determine filename in archive from original filename @inname and
|
|
+ * requested new filename @outname and depending on @type. */
|
|
+static void set_outname(char **result_ptr, const char *outname,
|
|
+ const char *inname, enum job_type type)
|
|
+{
|
|
+ const char *prefix = "", *name, *suffix;
|
|
+ char *result, *end;
|
|
+ size_t olen = outname ? strlen(outname) : 0, plen, nlen;
|
|
+
|
|
+ if (olen == 0) {
|
|
+ /* No output name specified: outname = inname */
|
|
+ name = inname;
|
|
+ } else if (outname[olen - 1] == '/') {
|
|
+ /* Output name is a directory: outname = outname/inname */
|
|
+ prefix = outname;
|
|
+ name = inname;
|
|
+ } else {
|
|
+ /* Output name is a filename: outname = inname */
|
|
+ name = outname;
|
|
+ }
|
|
+
|
|
+ if (type == JOB_DIR)
|
|
+ suffix = "/";
|
|
+ else
|
|
+ suffix = "";
|
|
+
|
|
+ plen = strlen(prefix);
|
|
+ nlen = strlen(name);
|
|
+
|
|
+ result = mmalloc(plen + nlen + strlen(suffix) + /* NUL */ 1);
|
|
+
|
|
+ /* Add prefix */
|
|
+ strcpy(result, prefix);
|
|
+
|
|
+ /* Add name */
|
|
+ end = result + plen;
|
|
+ strcpy(end, name);
|
|
+ if (type == JOB_CMD)
|
|
+ escape_filename(end);
|
|
+
|
|
+ /* Add suffix */
|
|
+ end = end + nlen;
|
|
+ strcpy(end, suffix);
|
|
+
|
|
+ remove_double_slashes(result);
|
|
+
|
|
+ *result_ptr = result;
|
|
+}
|
|
+
|
|
+static void sanitize_dirname(char **name_ptr)
|
|
+{
|
|
+ char *name;
|
|
+
|
|
+ name = mmalloc(strlen(*name_ptr) + /* Slash */ 1 + /* NUL */ 1);
|
|
+ strcpy(name, *name_ptr);
|
|
+ remove_double_slashes(name);
|
|
+ chomp(name, "/");
|
|
+ strcat(name, "/");
|
|
+ free(*name_ptr);
|
|
+ *name_ptr = name;
|
|
+}
|
|
+
|
|
+/* Allocate and initialize a new job representation to add an entry according
|
|
+ * to the specified parameters. @relname and @dref are used for opening files
|
|
+ * more efficiently using *at() functions if specified. @is_cmd specifies if
|
|
+ * the specified inname is a command line. */
|
|
+static struct job *create_job(struct task *task, const char *inname,
|
|
+ const char *outname, bool is_cmd,
|
|
+ const char *relname, struct dref *dref,
|
|
+ struct stats *stats)
|
|
+{
|
|
+ struct job *job = mmalloc(sizeof(struct job));
|
|
+ int rc;
|
|
+
|
|
+ DBG("create job inname=%s outname=%s is_cmd=%d relname=%s dref=%p",
|
|
+ inname, outname, is_cmd, relname, dref);
|
|
+
|
|
+ job->status = JOB_QUEUED;
|
|
+
|
|
+ if (!inname) {
|
|
+ job->type = JOB_INIT;
|
|
+ return job;
|
|
+ }
|
|
+
|
|
+ job->inname = mstrdup(inname);
|
|
+
|
|
+ if (is_cmd) {
|
|
+ /* Special case - read from command output */
|
|
+ job->type = JOB_CMD;
|
|
+ set_dummy_stat(&job->stat);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (!relname && strcmp(job->inname, "-") == 0) {
|
|
+ /* Special case - read from standard input */
|
|
+ job->type = JOB_FILE;
|
|
+ set_dummy_stat(&job->stat);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ rc = stat_file(task->opts->dereference, job->inname, relname, dref,
|
|
+ &job->stat);
|
|
+
|
|
+ if (rc < 0) {
|
|
+ read_error(task, job->inname, "Cannot stat file");
|
|
+ free_job(task, job);
|
|
+ stats->num_failed++;
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (is_type_excluded(task->opts, job->stat.st_mode)) {
|
|
+ free_job(task, job);
|
|
+ stats->num_excluded++;
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (S_ISLNK(job->stat.st_mode)) {
|
|
+ job->type = JOB_LINK;
|
|
+ } else if (S_ISDIR(job->stat.st_mode)) {
|
|
+ job->type = JOB_DIR;
|
|
+ sanitize_dirname(&job->inname);
|
|
+
|
|
+ /* No need to keep parent directory open */
|
|
+ relname = NULL;
|
|
+ dref = NULL;
|
|
+ } else {
|
|
+ job->type = JOB_FILE;
|
|
+ }
|
|
+
|
|
+ if (relname)
|
|
+ job->relname = mstrdup(relname);
|
|
+ job->dref = dref_get(dref);
|
|
+
|
|
+out:
|
|
+ set_outname(&job->outname, outname, inname, job->type);
|
|
+
|
|
+ return job;
|
|
+}
|
|
+
|
|
+void job_print(struct job *job)
|
|
+{
|
|
+ printf("DEBUG: job_print at %p\n", job);
|
|
+ printf("DEBUG: next_job=%p\n", job->next_job);
|
|
+ printf("DEBUG: type=%d\n", job->type);
|
|
+ printf("DEBUG: status==%d\n", job->status);
|
|
+ printf("DEBUG: outname=%s\n", job->outname);
|
|
+ printf("DEBUG: inname=%s\n", job->inname);
|
|
+ printf("DEBUG: relname=%s\n", job->relname);
|
|
+ printf("DEBUG: timed=%d\n", job->timed);
|
|
+ printf("DEBUG: dref=%p\n", job->dref);
|
|
+ printf("DEBUG: cmd_status=%d\n", job->cmd_status);
|
|
+ printf("DEBUG: content=%p\n", job->content);
|
|
+}
|
|
+
|
|
+/* Return the number of bytes written to the output file */
|
|
+static size_t get_output_size(struct task *task)
|
|
+{
|
|
+#ifdef HAVE_ZLIB
|
|
+ if (task->opts->gzip) {
|
|
+ gzflush(task->output_gzfd, Z_SYNC_FLUSH);
|
|
+ return gztell(task->output_gzfd);
|
|
+ }
|
|
+#endif /* HAVE_ZLIB */
|
|
+ return task->output_written;
|
|
+}
|
|
+
|
|
+/* Write @len bytes at address @ptr to the output file */
|
|
+static int write_output(struct task *task, const char *ptr, size_t len)
|
|
+{
|
|
+ size_t todo = len;
|
|
+ ssize_t w;
|
|
+
|
|
+#ifdef HAVE_ZLIB
|
|
+ if (task->opts->gzip) {
|
|
+ if (gzwrite(task->output_gzfd, ptr, len) == 0)
|
|
+ goto err_write;
|
|
+ task->output_written += len;
|
|
+
|
|
+ return EXIT_OK;
|
|
+ }
|
|
+#endif /* HAVE_ZLIB */
|
|
+
|
|
+ while (todo > 0) {
|
|
+ w = write(task->output_fd, ptr, todo);
|
|
+ if (w < 0)
|
|
+ goto err_write;
|
|
+ todo -= w;
|
|
+ ptr += w;
|
|
+ }
|
|
+ task->output_written += len;
|
|
+
|
|
+ return EXIT_OK;
|
|
+
|
|
+err_write:
|
|
+ write_error(task, "Cannot write output");
|
|
+
|
|
+ return EXIT_RUNTIME;
|
|
+}
|
|
+
|
|
+/* Write an end-of-file marker to the output file */
|
|
+static void write_eof(struct task *task)
|
|
+{
|
|
+ char zeroes[TAR_BLOCKSIZE];
|
|
+
|
|
+ memset(zeroes, 0, sizeof(zeroes));
|
|
+ write_output(task, zeroes, TAR_BLOCKSIZE);
|
|
+ write_output(task, zeroes, TAR_BLOCKSIZE);
|
|
+}
|
|
+
|
|
+/* Callback for writing out chunks of job data */
|
|
+static int _write_job_data_cb(void *data, void *addr, size_t len)
|
|
+{
|
|
+ struct task *task = data;
|
|
+
|
|
+ return write_output(task, addr, len);
|
|
+}
|
|
+
|
|
+/* Write tar entry for a file containing the exit status of the process that
|
|
+ * ran command job @job */
|
|
+static int write_job_status_file(struct task *task, struct job *job)
|
|
+{
|
|
+ char *name, *content;
|
|
+ size_t len;
|
|
+ struct stat st;
|
|
+ int rc, status = job->cmd_status, exitstatus = -1, termsig = -1,
|
|
+ waitpid_errno = -1;
|
|
+
|
|
+ name = masprintf("%s.cmdstatus", job->outname);
|
|
+ if (status < 0)
|
|
+ waitpid_errno = -status;
|
|
+ else if (WIFEXITED(status))
|
|
+ exitstatus = WEXITSTATUS(status);
|
|
+ else if (WIFSIGNALED(status))
|
|
+ termsig = WTERMSIG(status);
|
|
+
|
|
+ content = masprintf("EXITSTATUS=%d\n"
|
|
+ "TERMSIG=%d\n"
|
|
+ "WAITPID_ERRNO=%d\n", exitstatus, termsig,
|
|
+ waitpid_errno);
|
|
+
|
|
+ len = strlen(content);
|
|
+ set_dummy_stat(&st);
|
|
+ rc = tar_emit_file_from_data(name, NULL, len, &st, TYPE_REGULAR,
|
|
+ content, _write_job_data_cb, task);
|
|
+ free(name);
|
|
+ free(content);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/* Write tar entry for data in @job to output. Must be called with output_lock
|
|
+ * held. */
|
|
+static void _write_job_data(struct task *task, struct job *job)
|
|
+{
|
|
+ struct buffer *buffer = job->content;
|
|
+
|
|
+ switch (job->status) {
|
|
+ case JOB_DONE:
|
|
+ case JOB_PARTIAL:
|
|
+ break;
|
|
+ case JOB_FAILED:
|
|
+ /* Create empty entries for failed reads */
|
|
+ if (task->opts->ignore_failed_read)
|
|
+ break;
|
|
+ return;
|
|
+ default:
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ switch (job->type) {
|
|
+ case JOB_CMD:
|
|
+ tar_emit_file_from_buffer(job->outname, NULL, buffer->total,
|
|
+ &job->stat, TYPE_REGULAR, buffer,
|
|
+ _write_job_data_cb, task);
|
|
+ task->output_num_files++;
|
|
+ if (task->opts->add_cmd_status) {
|
|
+ write_job_status_file(task, job);
|
|
+ task->output_num_files++;
|
|
+ }
|
|
+ break;
|
|
+ case JOB_FILE:
|
|
+ tar_emit_file_from_buffer(job->outname, NULL, buffer->total,
|
|
+ &job->stat, TYPE_REGULAR, buffer,
|
|
+ _write_job_data_cb, task);
|
|
+ task->output_num_files++;
|
|
+ break;
|
|
+ case JOB_LINK:
|
|
+ tar_emit_file_from_buffer(job->outname, buffer->addr, 0,
|
|
+ &job->stat, TYPE_LINK, NULL,
|
|
+ _write_job_data_cb, task);
|
|
+ task->output_num_files++;
|
|
+ break;
|
|
+ case JOB_DIR:
|
|
+ tar_emit_file_from_buffer(job->outname, NULL, 0, &job->stat,
|
|
+ TYPE_DIR, NULL, _write_job_data_cb,
|
|
+ task);
|
|
+ task->output_num_files++;
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (task->opts->max_size > 0 &&
|
|
+ get_output_size(task) > task->opts->max_size) {
|
|
+ mwarnx("Archive size exceeds maximum of %ld bytes - aborting",
|
|
+ task->opts->max_size);
|
|
+ SET_ABORTED(task);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Read the contents of the symbolic link at @filename. On success, the
|
|
+ * contents is returned in @buffer and the return value is %EXIT_OK.
|
|
+ * If @relname is non-null it points to the name of the file relative
|
|
+ * to its parent directory for which @dirfd is an open file handle. */
|
|
+static int read_symlink(struct task *task, const char *filename,
|
|
+ const char *relname, int dirfd, struct buffer *buffer)
|
|
+{
|
|
+ ssize_t actual = 0;
|
|
+ size_t currlen = buffer->size ? buffer->size :
|
|
+ task->opts->read_chunk_size;
|
|
+ int rc = EXIT_OK;
|
|
+
|
|
+ while (!is_aborted(task)) {
|
|
+ buffer_make_room(buffer, currlen, false,
|
|
+ task->opts->max_buffer_size);
|
|
+
|
|
+ cancel_enable();
|
|
+ if (relname)
|
|
+ actual = readlinkat(dirfd, relname, buffer->addr,
|
|
+ buffer->size);
|
|
+ else
|
|
+ actual = readlink(filename, buffer->addr, buffer->size);
|
|
+ cancel_disable();
|
|
+
|
|
+ if (actual == -1) {
|
|
+ read_error(task, filename, "Cannot read link");
|
|
+ rc = EXIT_RUNTIME;
|
|
+ /* Reset actual counter to get an empty buffer */
|
|
+ actual = 0;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /* Ensure that content doesn't exceed --file-max-size limit */
|
|
+ if (task->opts->file_max_size > 0 &&
|
|
+ (size_t) actual > task->opts->file_max_size) {
|
|
+ actual = task->opts->file_max_size;/* Don't count NUL */
|
|
+ mwarnx("%s: Warning: Data exceeds maximum size of %ld "
|
|
+ "bytes - truncating", filename,
|
|
+ task->opts->file_max_size);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if ((size_t) actual < buffer->size)
|
|
+ break;
|
|
+
|
|
+ currlen += task->opts->read_chunk_size;
|
|
+ }
|
|
+
|
|
+ if (rc == EXIT_OK && is_aborted(task))
|
|
+ rc = EXIT_RUNTIME;
|
|
+
|
|
+ buffer->addr[actual] = 0;
|
|
+ buffer->total = actual + 1;
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/* Read data from the file descriptor @fd until an end-of-file condition is
|
|
+ * encountered. On success, *@done bytes in @buffer contain the read data
|
|
+ * and the return value is %EXIT_OK. */
|
|
+static int read_fd(struct task *task, const char *name, int fd,
|
|
+ struct buffer *buffer)
|
|
+{
|
|
+ ssize_t rc = 0;
|
|
+ size_t c = buffer->size ? buffer->size : task->opts->read_chunk_size;
|
|
+
|
|
+ while (!is_aborted(task)) {
|
|
+ cancel_enable();
|
|
+ rc = buffer_read_fd(buffer, fd, c, true,
|
|
+ task->opts->max_buffer_size);
|
|
+ cancel_disable();
|
|
+
|
|
+ if (rc <= 0)
|
|
+ break;
|
|
+
|
|
+ /* Ensure that content doesn't exceed --file-max-size limit */
|
|
+ if (task->opts->file_max_size > 0 &&
|
|
+ buffer->total >= task->opts->file_max_size) {
|
|
+ buffer_truncate(buffer, task->opts->file_max_size);
|
|
+ rc = 0;
|
|
+ mwarnx("%s: Warning: Data exceeds maximum size of %ld "
|
|
+ "bytes - truncating", name,
|
|
+ task->opts->file_max_size);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ c = buffer->size - buffer->off;
|
|
+ if (c > 0) {
|
|
+ /* Read to memory */
|
|
+ } else if (buffer->size + task->opts->read_chunk_size <
|
|
+ task->opts->max_buffer_size) {
|
|
+ /* Enlarge memory buffer */
|
|
+ c = task->opts->read_chunk_size;
|
|
+ } else {
|
|
+ /* Use full memory buffer size */
|
|
+ c = task->opts->max_buffer_size;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (is_aborted(task) || rc != 0)
|
|
+ return EXIT_RUNTIME;
|
|
+
|
|
+ return EXIT_OK;
|
|
+}
|
|
+
|
|
+/* Read data from the file at @filename until an end-of-file condition is
|
|
+ * encountered. On success, @buffer contains the data read and the return
|
|
+ * value is %EXIT_OK. If @relname is non-null it points to the name of the
|
|
+ * file relative to its parent directory for which @dirfd is an open file
|
|
+ * handle. */
|
|
+static int read_regular(struct task *task, const char *filename,
|
|
+ const char *relname, int dirfd, struct buffer *buffer)
|
|
+{
|
|
+ int fd, rc = EXIT_OK;
|
|
+ bool need_close = true;
|
|
+
|
|
+ /* Opening a named pipe can block when peer is not ready */
|
|
+ cancel_enable();
|
|
+ if (strcmp(filename, "-") == 0) {
|
|
+ fd = STDIN_FILENO;
|
|
+ need_close = false;
|
|
+ filename = "Standard input";
|
|
+ } else if (relname)
|
|
+ fd = openat(dirfd, relname, O_RDONLY);
|
|
+ else
|
|
+ fd = open(filename, O_RDONLY);
|
|
+ cancel_disable();
|
|
+
|
|
+ if (fd < 0) {
|
|
+ read_error(task, filename, "Cannot open file");
|
|
+ return EXIT_RUNTIME;
|
|
+ }
|
|
+
|
|
+ rc = read_fd(task, filename, fd, buffer);
|
|
+ if (rc) {
|
|
+ if (is_aborted(task))
|
|
+ mwarnx("%s: Read aborted", filename);
|
|
+ else
|
|
+ read_error(task, filename, "Cannot read file");
|
|
+ }
|
|
+
|
|
+ if (need_close)
|
|
+ close(fd);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/* Read the output of command @cmd until an end-of-file condition is
|
|
+ * encountered. On success, @buffer contain the output and the return value
|
|
+ * is %EXIT_OK. When not %NULL, use @status_ptr to store the resulting process
|
|
+ * status. */
|
|
+static int read_cmd_output(struct task *task, char *cmd, struct buffer *buffer,
|
|
+ int *status_ptr)
|
|
+{
|
|
+ int fd, rc = EXIT_RUNTIME;
|
|
+ pid_t pid;
|
|
+
|
|
+ fd = cmd_open(cmd, &pid);
|
|
+ if (fd < 0) {
|
|
+ read_error(task, cmd, "Cannot run command");
|
|
+ return rc;
|
|
+ }
|
|
+
|
|
+ if (read_fd(task, cmd, fd, buffer)) {
|
|
+ if (is_aborted(task))
|
|
+ mwarnx("%s: Command aborted", cmd);
|
|
+ else
|
|
+ read_error(task, cmd, "Cannot read command output");
|
|
+ } else
|
|
+ rc = EXIT_OK;
|
|
+
|
|
+ cmd_close(fd, pid, status_ptr);
|
|
+
|
|
+ return rc;
|
|
+
|
|
+}
|
|
+
|
|
+/* Check the exclude patterns in @task->opts->exclude for a match of @filename.
|
|
+ * If found, return the matching pattern string, otherwise return %NULL. */
|
|
+static const char *get_exclude_match(struct task *task, const char *filename)
|
|
+{
|
|
+ unsigned int i;
|
|
+ int mode = FNM_PERIOD | FNM_NOESCAPE;
|
|
+
|
|
+ for (i = 0; i < task->opts->exclude.num; i++) {
|
|
+ if (fnmatch(task->opts->exclude.str[i], filename, mode) == 0)
|
|
+ return task->opts->exclude.str[i];
|
|
+ }
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/* Add the specified @job to the start of the job queue */
|
|
+static void _queue_job_head(struct task *task, struct job *job)
|
|
+{
|
|
+ DBG("queue job type=%d inname=%s at head", job->type, job->inname);
|
|
+ job->next_job = task->jobs_head;
|
|
+ task->jobs_head = job;
|
|
+ if (!task->jobs_tail)
|
|
+ task->jobs_tail = job;
|
|
+}
|
|
+
|
|
+/* Add the specified @job to the end of the job queue */
|
|
+static void _queue_job_tail(struct task *task, struct job *job)
|
|
+{
|
|
+ DBG("queue job type=%d inname=%s at tail", job->type, job->inname);
|
|
+ if (task->jobs_tail)
|
|
+ task->jobs_tail->next_job = job;
|
|
+ else
|
|
+ task->jobs_head = job;
|
|
+ task->jobs_tail = job;
|
|
+}
|
|
+
|
|
+/* Add the specified @job to the job queue and trigger processing.
|
|
+ * If @head is %true, the new job is inserted at the start of the job queue,
|
|
+ * otherwise at the end. */
|
|
+static void queue_job(struct task *task, struct job *job, bool head)
|
|
+{
|
|
+ main_lock(task);
|
|
+ task->num_jobs_active++;
|
|
+ if (head)
|
|
+ _queue_job_head(task, job);
|
|
+ else
|
|
+ _queue_job_tail(task, job);
|
|
+ _worker_wakeup_one(task);
|
|
+ main_unlock(task);
|
|
+}
|
|
+
|
|
+/* Add the specified list of jobs starting with @first up to @last to the start
|
|
+ * of the job queue and trigger processing */
|
|
+static void queue_jobs(struct task *task, struct job *first, struct job *last,
|
|
+ int num)
|
|
+{
|
|
+ main_lock(task);
|
|
+ last->next_job = task->jobs_head;
|
|
+ task->jobs_head = first;
|
|
+ task->num_jobs_active += num;
|
|
+ _worker_wakeup_all(task);
|
|
+ main_unlock(task);
|
|
+}
|
|
+
|
|
+/* Remove the head of the job queue and return it to the caller */
|
|
+static struct job *_dequeue_job(struct task *task)
|
|
+{
|
|
+ struct job *job = NULL;
|
|
+
|
|
+ if (task->jobs_head) {
|
|
+ job = task->jobs_head;
|
|
+ task->jobs_head = job->next_job;
|
|
+ job->next_job = NULL;
|
|
+ if (job == task->jobs_tail)
|
|
+ task->jobs_tail = NULL;
|
|
+ DBG("dequeueing job type=%d inname=%s", job->type, job->inname);
|
|
+ job->status = JOB_IN_PROGRESS;
|
|
+ } else {
|
|
+ DBG("no job to dequeue");
|
|
+ }
|
|
+
|
|
+ return job;
|
|
+}
|
|
+
|
|
+/* Create and queue job for file at @filename */
|
|
+static void queue_file(struct task *task, const char *inname,
|
|
+ const char *outname, bool is_cmd,
|
|
+ const char *relname, struct dref *dref,
|
|
+ struct stats *stats, bool head)
|
|
+{
|
|
+ struct job *job;
|
|
+
|
|
+ job = create_job(task, inname, outname, is_cmd, relname, dref, stats);
|
|
+ if (job)
|
|
+ queue_job(task, job, head);
|
|
+}
|
|
+
|
|
+/* Queue initial job */
|
|
+static void init_queue(struct task *task)
|
|
+{
|
|
+ queue_file(task, NULL, NULL, false, NULL, NULL, NULL, true);
|
|
+}
|
|
+
|
|
+/* Create and queue jobs for all files found in @dirname */
|
|
+static void queue_dir(struct task *task, const char *dirname,
|
|
+ const char *outname, struct stats *stats)
|
|
+{
|
|
+ struct dirent *de;
|
|
+ char *inpath, *outpath;
|
|
+ struct dref *dref;
|
|
+ struct job *job, *first = NULL, *last = NULL;
|
|
+ int num = 0;
|
|
+
|
|
+ dref = dref_create(dirname);
|
|
+ if (!dref) {
|
|
+ read_error(task, dirname, "Cannot read directory");
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ while ((de = readdir(dref->dd))) {
|
|
+ if (de->d_name[0] == '.') {
|
|
+ if (de->d_name[1] == 0)
|
|
+ continue;
|
|
+ if (de->d_name[1] == '.' && de->d_name[2] == 0)
|
|
+ continue;
|
|
+ }
|
|
+ DBG("next file %s", de->d_name);
|
|
+ inpath = masprintf("%s%s", dirname, de->d_name);
|
|
+ outpath = masprintf("%s%s", outname, de->d_name);
|
|
+ job = create_job(task, inpath, outpath, false, de->d_name, dref,
|
|
+ stats);
|
|
+ if (job) {
|
|
+ if (last) {
|
|
+ last->next_job = job;
|
|
+ last = job;
|
|
+ } else {
|
|
+ first = job;
|
|
+ last = job;
|
|
+ }
|
|
+ num++;
|
|
+ }
|
|
+ free(inpath);
|
|
+ free(outpath);
|
|
+ }
|
|
+
|
|
+ if (first)
|
|
+ queue_jobs(task, first, last, num);
|
|
+
|
|
+ dref_put(dref);
|
|
+}
|
|
+
|
|
+/* Create and queue jobs for all files specified on the command line */
|
|
+static void queue_jobs_from_opts(struct task *task, struct stats *stats)
|
|
+{
|
|
+ struct dump_opts *opts = task->opts;
|
|
+ unsigned int i;
|
|
+
|
|
+ /* Queue directly specified entries */
|
|
+ for (i = 0; i < opts->num_specs && !is_aborted(task); i++) {
|
|
+ queue_file(task, opts->specs[i].inname, opts->specs[i].outname,
|
|
+ opts->specs[i].is_cmd, NULL, NULL, stats, false);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Prepare output stream */
|
|
+static int open_output(struct task *task)
|
|
+{
|
|
+ bool to_stdout = !task->opts->output_file ||
|
|
+ strcmp(task->opts->output_file, "-") == 0;
|
|
+ int rc = EXIT_OK;
|
|
+
|
|
+ if (to_stdout) {
|
|
+ set_stdout_data();
|
|
+ task->opts->output_file = "Standard output";
|
|
+ }
|
|
+
|
|
+ cancel_enable();
|
|
+#ifdef HAVE_ZLIB
|
|
+ if (task->opts->gzip) {
|
|
+ if (to_stdout) {
|
|
+ task->output_gzfd =
|
|
+ gzdopen(STDOUT_FILENO,
|
|
+ task->opts->append ? "ab" : "wb");
|
|
+ } else {
|
|
+ task->output_gzfd =
|
|
+ gzopen(task->opts->output_file,
|
|
+ task->opts->append ? "ab" : "wb");
|
|
+ }
|
|
+
|
|
+ if (!task->output_gzfd)
|
|
+ rc = EXIT_RUNTIME;
|
|
+ goto out;
|
|
+ }
|
|
+#endif /* HAVE_ZLIB */
|
|
+
|
|
+ if (to_stdout) {
|
|
+ task->output_fd = STDOUT_FILENO;
|
|
+ } else {
|
|
+ task->output_fd =
|
|
+ open(task->opts->output_file, O_WRONLY | O_CREAT |
|
|
+ (task->opts->append ? O_APPEND : 0), 0666);
|
|
+ }
|
|
+
|
|
+ if (task->output_fd < 0)
|
|
+ rc = EXIT_RUNTIME;
|
|
+ else if (!task->opts->append && ftruncate(task->output_fd, 0) == -1)
|
|
+ rc = EXIT_RUNTIME;
|
|
+
|
|
+#ifdef HAVE_ZLIB
|
|
+out:
|
|
+#endif /* HAVE_ZLIB */
|
|
+ cancel_disable();
|
|
+
|
|
+ if (rc != EXIT_OK) {
|
|
+ mwarn("%s: Cannot open output file", task->opts->output_file);
|
|
+ return rc;
|
|
+ }
|
|
+
|
|
+ return EXIT_OK;
|
|
+}
|
|
+
|
|
+/* Determine if the specified @job should be excluded from archiving */
|
|
+static bool is_job_excluded(struct task *task, struct job *job)
|
|
+{
|
|
+ const char *pat;
|
|
+
|
|
+ if (job->type == JOB_INIT || job->type == JOB_CMD)
|
|
+ return false;
|
|
+
|
|
+ pat = get_exclude_match(task, job->inname);
|
|
+ if (!pat)
|
|
+ return false;
|
|
+
|
|
+ tverb("Excluding '%s' due to exclude pattern '%s'\n", job->inname, pat);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/* Perform all actions necessary to process @job and add resulting tar
|
|
+ * data buffers to the buffer list of @thread. */
|
|
+static void process_job(struct per_thread *thread, struct job *job)
|
|
+{
|
|
+ struct task *task = thread->task;
|
|
+ const char *relname = job->dref ? job->relname : NULL;
|
|
+ int dirfd = job->dref ? job->dref->dirfd : -1;
|
|
+ struct buffer *buffer = &thread->buffer;
|
|
+ enum job_status status = JOB_DONE;
|
|
+
|
|
+ DBG("processing job type=%d inname=%s", job->type, job->inname);
|
|
+
|
|
+ if (is_job_excluded(task, job)) {
|
|
+ status = JOB_EXCLUDED;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ switch (job->type) {
|
|
+ case JOB_INIT: /* Perform initial setup steps */
|
|
+ if (open_output(task)) {
|
|
+ SET_ABORTED(task);
|
|
+ status = JOB_FAILED;
|
|
+ goto out;
|
|
+ }
|
|
+ queue_jobs_from_opts(task, &thread->stats);
|
|
+ break;
|
|
+ case JOB_CMD: /* Capture command output */
|
|
+ tverb("Dumping command output '%s'\n", job->inname);
|
|
+
|
|
+ set_dummy_stat(&job->stat);
|
|
+ if (read_cmd_output(task, job->inname, buffer,
|
|
+ &job->cmd_status))
|
|
+ status = JOB_FAILED;
|
|
+
|
|
+ break;
|
|
+ case JOB_LINK: /* Read symbolic link */
|
|
+ tverb("Dumping link '%s'\n", job->inname);
|
|
+
|
|
+ if (read_symlink(task, job->inname, relname, dirfd, buffer))
|
|
+ status = JOB_FAILED;
|
|
+
|
|
+ break;
|
|
+ case JOB_DIR: /* Read directory contents */
|
|
+ tverb("Dumping directory '%s'\n", job->inname);
|
|
+
|
|
+ if (task->opts->recursive) {
|
|
+ queue_dir(task, job->inname, job->outname,
|
|
+ &thread->stats);
|
|
+ }
|
|
+ break;
|
|
+ case JOB_FILE: /* Read file contents */
|
|
+ tverb("Dumping file '%s'\n", job->inname);
|
|
+
|
|
+ if (read_regular(task, job->inname, relname, dirfd, buffer))
|
|
+ status = JOB_FAILED;
|
|
+
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+out:
|
|
+ job->status = status;
|
|
+ DBG("processing done status=%d", job->status);
|
|
+}
|
|
+
|
|
+/* Add @job results to statistics @stats */
|
|
+static void account_stats(struct task *task, struct stats *stats,
|
|
+ struct job *job)
|
|
+{
|
|
+ DBG("accounting job %s", job->inname);
|
|
+
|
|
+ if (job->type == JOB_INIT)
|
|
+ return;
|
|
+
|
|
+ switch (job->status) {
|
|
+ case JOB_DONE:
|
|
+ stats->num_done++;
|
|
+ if (job->type == JOB_CMD && task->opts->add_cmd_status)
|
|
+ stats->num_done++;
|
|
+ break;
|
|
+ case JOB_PARTIAL:
|
|
+ stats->num_done++;
|
|
+ stats->num_partial++;
|
|
+ if (job->type == JOB_CMD && task->opts->add_cmd_status)
|
|
+ stats->num_done++;
|
|
+ break;
|
|
+ case JOB_FAILED:
|
|
+ stats->num_failed++;
|
|
+ break;
|
|
+ case JOB_EXCLUDED:
|
|
+ stats->num_excluded++;
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Add statistics @from to @to */
|
|
+static void add_stats(struct stats *to, struct stats *from)
|
|
+{
|
|
+ to->num_done += from->num_done;
|
|
+ to->num_partial += from->num_partial;
|
|
+ to->num_excluded += from->num_excluded;
|
|
+ to->num_failed += from->num_failed;
|
|
+}
|
|
+
|
|
+/* Release resources allocated to @thread */
|
|
+static void cleanup_thread(struct per_thread *thread)
|
|
+{
|
|
+ if (thread->job)
|
|
+ free_job(thread->task, thread->job);
|
|
+ buffer_free(&thread->buffer, false);
|
|
+}
|
|
+
|
|
+/* Register activate @job at @thread */
|
|
+static void start_thread_job(struct per_thread *thread, struct job *job)
|
|
+{
|
|
+ struct task *task = thread->task;
|
|
+
|
|
+ thread->job = job;
|
|
+ job->content = &thread->buffer;
|
|
+ if (task->opts->file_timeout > 0 && job->type != JOB_INIT) {
|
|
+ /* Set up per-job timeout */
|
|
+ set_timespec(&job->deadline, task->opts->file_timeout, 0);
|
|
+ job->timed = true;
|
|
+
|
|
+ /* Signal main thread to update deadline timeout */
|
|
+ _main_wakeup(task);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Unregister active @job at @thread */
|
|
+static void stop_thread_job(struct per_thread *thread, struct job *job)
|
|
+{
|
|
+ thread->job = NULL;
|
|
+ job->content = NULL;
|
|
+ buffer_reset(&thread->buffer);
|
|
+}
|
|
+
|
|
+/* Wait until a job is available in the job queue. When a job becomes
|
|
+ * available, dequeue and return it. Return %NULL if no more jobs are
|
|
+ * available, or if processing was aborted. Must be called with task->mutex
|
|
+ * locked. */
|
|
+static struct job *_get_next_job(struct task *task)
|
|
+{
|
|
+ struct job *job = NULL;
|
|
+
|
|
+ do {
|
|
+ DBG("checking for jobs");
|
|
+ if (task->aborted)
|
|
+ break;
|
|
+ job = _dequeue_job(task);
|
|
+ if (job)
|
|
+ break;
|
|
+ if (task->num_jobs_active == 0)
|
|
+ break;
|
|
+ DBG("found no jobs (%d active)", task->num_jobs_active);
|
|
+ } while (_worker_wait(task) == 0);
|
|
+
|
|
+ return job;
|
|
+}
|
|
+
|
|
+/* Unlock the mutex specified by @data */
|
|
+static void cleanup_unlock(void *data)
|
|
+{
|
|
+ pthread_mutex_t *mutex = data;
|
|
+
|
|
+ pthread_mutex_unlock(mutex);
|
|
+}
|
|
+
|
|
+/* Write entry for data in @job to output */
|
|
+static void write_job_data(struct task *task, struct job *job)
|
|
+{
|
|
+ DBG("write_job_data");
|
|
+ output_lock(task);
|
|
+ pthread_cleanup_push(cleanup_unlock, &task->output_mutex);
|
|
+ cancel_enable();
|
|
+
|
|
+ _write_job_data(task, job);
|
|
+
|
|
+ cancel_disable();
|
|
+ pthread_cleanup_pop(0);
|
|
+ output_unlock(task);
|
|
+}
|
|
+
|
|
+/* Perform second part of job processing for @job at @thread by writing the
|
|
+ * resulting tar file entry */
|
|
+static void postprocess_job(struct per_thread *thread, struct job *job,
|
|
+ bool cancelable)
|
|
+{
|
|
+ struct task *task = thread->task;
|
|
+
|
|
+ account_stats(task, &thread->stats, job);
|
|
+ if (cancelable)
|
|
+ write_job_data(task, job);
|
|
+ else
|
|
+ _write_job_data(task, job);
|
|
+}
|
|
+
|
|
+/* Mark @job as complete by releasing all associated resources. If this was
|
|
+ * the last active job inform main thread. Must be called with main_lock
|
|
+ * mutex held. */
|
|
+static void _complete_job(struct task *task, struct job *job)
|
|
+{
|
|
+ task->num_jobs_active--;
|
|
+ if (task->num_jobs_active == 0)
|
|
+ _main_wakeup(task);
|
|
+ free_job(task, job);
|
|
+}
|
|
+
|
|
+static void init_thread(struct per_thread *thread, struct task *task, long num)
|
|
+{
|
|
+ memset(thread, 0, sizeof(struct per_thread));
|
|
+ thread->task = task;
|
|
+ thread->num = num;
|
|
+}
|
|
+
|
|
+/* Dequeue and process all jobs on the job queue */
|
|
+static int process_queue(struct task *task)
|
|
+{
|
|
+ struct job *job;
|
|
+ struct per_thread thread;
|
|
+
|
|
+ init_thread(&thread, task, 0);
|
|
+
|
|
+ while ((job = _dequeue_job(task)) && !is_aborted(task)) {
|
|
+ start_thread_job(&thread, job);
|
|
+ process_job(&thread, job);
|
|
+ postprocess_job(&thread, job, false);
|
|
+ stop_thread_job(&thread, job);
|
|
+ _complete_job(task, job);
|
|
+ }
|
|
+
|
|
+ task->stats = thread.stats;
|
|
+ cleanup_thread(&thread);
|
|
+
|
|
+ return EXIT_OK;
|
|
+}
|
|
+
|
|
+/* Return %true if @job is in a final state, %false otherwise */
|
|
+static bool job_is_final(struct job *job)
|
|
+{
|
|
+ switch (job->status) {
|
|
+ case JOB_DONE:
|
|
+ case JOB_PARTIAL:
|
|
+ case JOB_EXCLUDED:
|
|
+ case JOB_FAILED:
|
|
+ return true;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/* Main thread function: process jobs on the job queue until all jobs
|
|
+ * are processed or processing was aborted. */
|
|
+static void *worker_thread_main(void *d)
|
|
+{
|
|
+ struct per_thread *thread = d;
|
|
+ struct task *task = thread->task;
|
|
+ struct job *job;
|
|
+
|
|
+ /* Allow cancel only at specific code points */
|
|
+ cancel_disable();
|
|
+ set_threadname("%*sworker %d", (thread->num + 1) * 2, "", thread->num);
|
|
+
|
|
+ /* Handle jobs left over from canceled thread */
|
|
+ job = thread->job;
|
|
+ if (job) {
|
|
+ DBG("handle aborted job %p", job);
|
|
+
|
|
+ postprocess_job(thread, job, true);
|
|
+
|
|
+ main_lock(task);
|
|
+ if (thread->timed_out)
|
|
+ goto out;
|
|
+ stop_thread_job(thread, job);
|
|
+ _complete_job(task, job);
|
|
+ main_unlock(task);
|
|
+ }
|
|
+
|
|
+ DBG("enter worker loop");
|
|
+
|
|
+ main_lock(task);
|
|
+ while ((job = _get_next_job(task))) {
|
|
+ start_thread_job(thread, job);
|
|
+ main_unlock(task);
|
|
+
|
|
+ process_job(thread, job);
|
|
+ postprocess_job(thread, job, true);
|
|
+
|
|
+ main_lock(task);
|
|
+ if (thread->timed_out)
|
|
+ goto out;
|
|
+ stop_thread_job(thread, job);
|
|
+ _complete_job(task, job);
|
|
+ }
|
|
+
|
|
+out:
|
|
+ thread->running = false;
|
|
+ _main_wakeup(task);
|
|
+ main_unlock(task);
|
|
+
|
|
+ cancel_enable();
|
|
+ DBG("leave work loop");
|
|
+
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+/* Start a worker thread associated with the specified @data. Return %EXIT_OK on
|
|
+ * success. */
|
|
+static int start_worker_thread(struct per_thread *data)
|
|
+{
|
|
+ int rc;
|
|
+
|
|
+ DBG("start thread");
|
|
+ global_threaded = true;
|
|
+ data->timed_out = false;
|
|
+ rc = pthread_create(&data->thread, NULL, &worker_thread_main, data);
|
|
+ if (rc) {
|
|
+ mwarnx("Cannot start thread: %s", strerror(rc));
|
|
+ return EXIT_RUNTIME;
|
|
+ }
|
|
+ data->running = true;
|
|
+
|
|
+ return EXIT_OK;
|
|
+}
|
|
+
|
|
+/* Perform timeout handling for thread associated with @data by canceling and
|
|
+ * restarting the corresponding thread. Must be called with task->mutex
|
|
+ * held. */
|
|
+static void _timeout_thread(struct per_thread *data)
|
|
+{
|
|
+ struct task *task = data->task;
|
|
+ struct job *job = data->job;
|
|
+ pthread_t thread = data->thread;
|
|
+ const char *op, *action;
|
|
+
|
|
+ if (!job) {
|
|
+ /* Timeout raced with job completion */
|
|
+ return;
|
|
+ }
|
|
+ if (job_is_final(job)) {
|
|
+ /* Job processing done, timeout does not apply */
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ data->timed_out = true;
|
|
+
|
|
+ /* Allow thread to obtain main lock during cancel handling */
|
|
+ main_unlock(task);
|
|
+ DBG("cancel num=%d thread=%p", data->num, thread);
|
|
+ pthread_cancel(thread);
|
|
+ DBG("join num=%d thread=%p", data->num, thread);
|
|
+
|
|
+ pthread_join(thread, NULL);
|
|
+ main_lock(task);
|
|
+
|
|
+ DBG("join done");
|
|
+
|
|
+ if (job->type == JOB_CMD)
|
|
+ op = "Command";
|
|
+ else
|
|
+ op = "Read";
|
|
+
|
|
+ if (task->opts->ignore_failed_read)
|
|
+ action = "skipping";
|
|
+ else
|
|
+ action = "aborting";
|
|
+
|
|
+ if (!job->inname || !*job->inname)
|
|
+ job_print(job);
|
|
+ mwarnx("%s: %s%s timed out after %d second%s - %s", job->inname,
|
|
+ task->opts->ignore_failed_read ? "Warning: " : "", op,
|
|
+ task->opts->file_timeout,
|
|
+ task->opts->file_timeout > 1 ? "s" : "", action);
|
|
+ if (!task->opts->ignore_failed_read)
|
|
+ _SET_ABORTED(task);
|
|
+
|
|
+ /* Interrupted job will be handled by new thread - adjust status */
|
|
+ if (job->status == JOB_IN_PROGRESS)
|
|
+ job->status = JOB_PARTIAL;
|
|
+ else if (!job_is_final(job))
|
|
+ job->status = JOB_FAILED;
|
|
+
|
|
+ if (start_worker_thread(data))
|
|
+ _SET_ABORTED(task);
|
|
+}
|
|
+
|
|
+/* Return the number of currently running jobs */
|
|
+static long num_jobs_running(struct task *task, struct per_thread *threads)
|
|
+{
|
|
+ long i, num = 0;
|
|
+
|
|
+ for (i = 0; i < task->opts->jobs; i++) {
|
|
+ if (threads[i].running)
|
|
+ num++;
|
|
+ }
|
|
+
|
|
+ return num;
|
|
+}
|
|
+
|
|
+/* Wait until all jobs are done or timeout occurs */
|
|
+static int wait_for_completion(struct task *task, struct per_thread *threads)
|
|
+{
|
|
+ int rc = 0, earliest_timeout;
|
|
+ long i;
|
|
+ struct per_thread *earliest_thread;
|
|
+ struct timespec tool_deadline_ts, deadline_ts, *earliest_ts;
|
|
+ struct job *job;
|
|
+
|
|
+ /* Set tool deadline */
|
|
+ tool_deadline_ts = task->start_ts;
|
|
+ inc_timespec(&tool_deadline_ts, task->opts->timeout, 0);
|
|
+
|
|
+ main_lock(task);
|
|
+ while (!task->aborted && task->num_jobs_active > 0) {
|
|
+ /* Calculate nearest timeout */
|
|
+ earliest_timeout = 0;
|
|
+ earliest_ts = NULL;
|
|
+ earliest_thread = NULL;
|
|
+
|
|
+ if (task->opts->timeout > 0) {
|
|
+ earliest_timeout = task->opts->timeout;
|
|
+ earliest_ts = &tool_deadline_ts;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < task->opts->jobs; i++) {
|
|
+ job = threads[i].job;
|
|
+ if (!job || !job->timed)
|
|
+ continue;
|
|
+ if (task->opts->file_timeout == 0)
|
|
+ continue;
|
|
+ if (!earliest_ts ||
|
|
+ ts_before(&job->deadline, earliest_ts)) {
|
|
+ earliest_timeout = task->opts->file_timeout;
|
|
+ earliest_ts = &job->deadline;
|
|
+ earliest_thread = &threads[i];
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Wait for status change or timeout */
|
|
+ if (earliest_ts) {
|
|
+ deadline_ts = *earliest_ts;
|
|
+ rc = _main_wait_timed(task, &deadline_ts);
|
|
+ } else {
|
|
+ rc = _main_wait(task);
|
|
+ }
|
|
+
|
|
+ if (rc == 0)
|
|
+ continue;
|
|
+ if (rc != ETIMEDOUT) {
|
|
+ mwarnx("Cannot wait for status change: %s",
|
|
+ strerror(rc));
|
|
+ _SET_ABORTED(task);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /* Timeout handling */
|
|
+ if (earliest_thread) {
|
|
+ /* Per-file timeout, restart */
|
|
+ _timeout_thread(earliest_thread);
|
|
+ rc = 0;
|
|
+ } else {
|
|
+ /* Global timeout, abort */
|
|
+ mwarnx("Operation timed out after %d second%s - "
|
|
+ "aborting", earliest_timeout,
|
|
+ earliest_timeout > 1 ? "s" : "");
|
|
+ _SET_ABORTED(task);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (task->aborted)
|
|
+ DBG("aborted");
|
|
+ else
|
|
+ DBG("all work done");
|
|
+ _worker_wakeup_all(task);
|
|
+
|
|
+ /* Allow jobs to finish */
|
|
+ set_timespec(&deadline_ts, 0, NSEC_PER_SEC / 4);
|
|
+ while (!task->aborted && num_jobs_running(task, threads) > 0) {
|
|
+ DBG("waiting for %lu processes",
|
|
+ num_jobs_running(task, threads));
|
|
+
|
|
+ if (_main_wait_timed(task, &deadline_ts))
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ main_unlock(task);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/* Finalize output stream */
|
|
+static void close_output(struct task *task)
|
|
+{
|
|
+#ifdef HAVE_ZLIB
|
|
+ if (task->opts->gzip) {
|
|
+ gzclose(task->output_gzfd);
|
|
+ return;
|
|
+ }
|
|
+#endif /* HAVE_ZLIB */
|
|
+
|
|
+ if (task->output_fd != STDOUT_FILENO)
|
|
+ close(task->output_fd);
|
|
+}
|
|
+
|
|
+/* Start multi-threaded processing of job queue */
|
|
+static int process_queue_threaded(struct task *task)
|
|
+{
|
|
+ struct per_thread *threads, *thread;
|
|
+ int rc;
|
|
+ long i;
|
|
+
|
|
+ tverb("Using %ld threads\n", task->opts->jobs);
|
|
+ threads = mcalloc(sizeof(struct per_thread), task->opts->jobs);
|
|
+
|
|
+ rc = 0;
|
|
+ for (i = 0; i < task->opts->jobs; i++) {
|
|
+ init_thread(&threads[i], task, i);
|
|
+ rc = start_worker_thread(&threads[i]);
|
|
+ if (rc)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (!rc)
|
|
+ wait_for_completion(task, threads);
|
|
+
|
|
+ DBG("thread cleanup");
|
|
+ for (i = 0; i < task->opts->jobs; i++) {
|
|
+ thread = &threads[i];
|
|
+ if (thread->running) {
|
|
+ DBG("cancel %p", thread->thread);
|
|
+ pthread_cancel(thread->thread);
|
|
+ }
|
|
+ DBG("join %p", thread->thread);
|
|
+ pthread_join(thread->thread, NULL);
|
|
+ add_stats(&task->stats, &thread->stats);
|
|
+ cleanup_thread(thread);
|
|
+ }
|
|
+
|
|
+ free(threads);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/* Abort any remaining queued jobs and account to @stats */
|
|
+static void abort_queued_jobs(struct task *task)
|
|
+{
|
|
+ struct job *job;
|
|
+
|
|
+ while ((job = _dequeue_job(task))) {
|
|
+ DBG("aborting job %s", job->inname);
|
|
+ task->stats.num_failed++;
|
|
+ job->status = JOB_FAILED;
|
|
+ _complete_job(task, job);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Print a summary line */
|
|
+static void print_summary(struct task *task)
|
|
+{
|
|
+ char msg[MSG_LEN];
|
|
+ size_t off = 0;
|
|
+ int rc;
|
|
+ struct stats *stats = &task->stats;
|
|
+ struct timespec end_ts;
|
|
+ int num_special;
|
|
+ unsigned long num_added;
|
|
+
|
|
+ if (task->opts->quiet)
|
|
+ return;
|
|
+ set_timespec(&end_ts, 0, 0);
|
|
+
|
|
+ num_special = 0;
|
|
+ num_special += stats->num_partial > 0 ? 1 : 0;
|
|
+ num_special += stats->num_excluded > 0 ? 1 : 0;
|
|
+ num_special += stats->num_failed > 0 ? 1 : 0;
|
|
+
|
|
+ num_added = stats->num_done;
|
|
+ if (task->opts->ignore_failed_read)
|
|
+ num_added += stats->num_partial + stats->num_failed;
|
|
+
|
|
+ rc = snprintf(&msg[off], MSG_LEN - off, "Dumped %lu entries ",
|
|
+ num_added);
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+
|
|
+ if (num_special > 0) {
|
|
+ rc = snprintf(&msg[off], MSG_LEN - off, "(");
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+ if (stats->num_partial > 0) {
|
|
+ rc = snprintf(&msg[off], MSG_LEN - off, "%lu partial",
|
|
+ stats->num_partial);
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+ if (--num_special > 0) {
|
|
+ rc = snprintf(&msg[off], MSG_LEN - off, ", ");
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+ }
|
|
+ }
|
|
+ if (stats->num_excluded > 0) {
|
|
+ rc = snprintf(&msg[off], MSG_LEN - off, "%lu excluded",
|
|
+ stats->num_excluded);
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+ if (--num_special > 0) {
|
|
+ rc = snprintf(&msg[off], MSG_LEN - off, ", ");
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+ }
|
|
+ }
|
|
+ if (stats->num_failed > 0) {
|
|
+ rc = snprintf(&msg[off], MSG_LEN - off, "%lu failed",
|
|
+ stats->num_failed);
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+ }
|
|
+ rc = snprintf(&msg[off], MSG_LEN - off, ") ");
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+ }
|
|
+
|
|
+ rc = snprintf(&msg[off], MSG_LEN - off, "in ");
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+ snprintf_duration(&msg[off], MSG_LEN - off, &task->start_ts, &end_ts);
|
|
+
|
|
+out:
|
|
+ info("%s\n", msg);
|
|
+}
|
|
+
|
|
+static int init_task(struct task *task, struct dump_opts *opts)
|
|
+{
|
|
+ pthread_condattr_t attr;
|
|
+
|
|
+ memset(task, 0, sizeof(struct task));
|
|
+ set_timespec(&task->start_ts, 0, 0);
|
|
+ task->opts = opts;
|
|
+ pthread_mutex_init(&task->mutex, NULL);
|
|
+ pthread_mutex_init(&task->output_mutex, NULL);
|
|
+ pthread_cond_init(&task->worker_cond, NULL);
|
|
+
|
|
+ pthread_condattr_init(&attr);
|
|
+ if (pthread_condattr_setclock(&attr, CLOCK_MONOTONIC) ||
|
|
+ pthread_cond_init(&task->cond, &attr)) {
|
|
+ mwarn("Could not adjust pthread clock");
|
|
+ return EXIT_RUNTIME;
|
|
+ }
|
|
+
|
|
+ return EXIT_OK;
|
|
+}
|
|
+
|
|
+struct dump_opts *dump_opts_new(void)
|
|
+{
|
|
+ struct dump_opts *opts = mmalloc(sizeof(struct dump_opts));
|
|
+
|
|
+ opts->recursive = true;
|
|
+ opts->read_chunk_size = DEFAULT_READ_CHUNK_SIZE;
|
|
+ opts->max_buffer_size = DEFAULT_MAX_BUFFER_SIZE;
|
|
+
|
|
+ return opts;
|
|
+}
|
|
+
|
|
+void dump_opts_free(struct dump_opts *opts)
|
|
+{
|
|
+ unsigned int i;
|
|
+
|
|
+ if (!opts)
|
|
+ return;
|
|
+
|
|
+ free_strarray(&opts->exclude);
|
|
+ for (i = 0; i < opts->num_specs; i++) {
|
|
+ free(opts->specs[i].inname);
|
|
+ free(opts->specs[i].outname);
|
|
+ }
|
|
+ free(opts->specs);
|
|
+ free(opts);
|
|
+}
|
|
+
|
|
+void dump_opts_print(struct dump_opts *opts)
|
|
+{
|
|
+ unsigned int i;
|
|
+
|
|
+ printf("DEBUG: dump_opts at %p\n", opts);
|
|
+ if (!opts)
|
|
+ return;
|
|
+ printf("DEBUG: add_cmd_status=%d\n", opts->add_cmd_status);
|
|
+ printf("DEBUG: append=%d\n", opts->append);
|
|
+ printf("DEBUG: dereference=%d\n", opts->dereference);
|
|
+ for (i = 0; i < NUM_EXCLUDE_TYPES; i++)
|
|
+ printf("DEBUG: exclude_type[%d]=%d\n", i,
|
|
+ opts->exclude_type[i]);
|
|
+ printf("DEBUG: gzip=%d\n", opts->gzip);
|
|
+ printf("DEBUG: ignore_failed_read=%d\n", opts->ignore_failed_read);
|
|
+ printf("DEBUG: no_eof=%d\n", opts->no_eof);
|
|
+ printf("DEBUG: quiet=%d\n", opts->quiet);
|
|
+ printf("DEBUG: recursive=%d\n", opts->recursive);
|
|
+ printf("DEBUG: threaded=%d\n", opts->threaded);
|
|
+ printf("DEBUG: verbose=%d\n", opts->verbose);
|
|
+ printf("DEBUG: output_file=%s\n", opts->output_file);
|
|
+ printf("DEBUG: file_timeout=%d\n", opts->file_timeout);
|
|
+ printf("DEBUG: timeout=%d\n", opts->timeout);
|
|
+ printf("DEBUG: jobs=%ld\n", opts->jobs);
|
|
+ printf("DEBUG: jobs_per_cpu=%ld\n", opts->jobs_per_cpu);
|
|
+ printf("DEBUG: file_max_size=%zu\n", opts->file_max_size);
|
|
+ printf("DEBUG: max_buffer_size=%zu\n", opts->max_buffer_size);
|
|
+ printf("DEBUG: max_size=%zu\n", opts->max_size);
|
|
+ printf("DEBUG: read_chunk_size=%zu\n", opts->read_chunk_size);
|
|
+ for (i = 0; i < opts->exclude.num; i++)
|
|
+ printf("DEBUG: exclude[%d]=%s\n", i, opts->exclude.str[i]);
|
|
+ for (i = 0; i < opts->num_specs; i++) {
|
|
+ printf("DEBUG: specs[%d]:\n", i);
|
|
+ printf("DEBUG: inname=%s\n", opts->specs[i].inname);
|
|
+ printf("DEBUG: outname=%s\n", opts->specs[i].outname);
|
|
+ printf("DEBUG: is_cmd=%d\n", opts->specs[i].is_cmd);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Mark file type associated with character @c as excluded */
|
|
+int dump_opts_set_type_excluded(struct dump_opts *opts, char c)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < NUM_EXCLUDE_TYPES; i++) {
|
|
+ if (exclude_types[i].c == c) {
|
|
+ opts->exclude_type[i] = true;
|
|
+ return 0;
|
|
+ }
|
|
+ }
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+/* Add entry specification defined by @iname, @outname and @op to @opts. */
|
|
+void dump_opts_add_spec(struct dump_opts *opts, char *inname, char *outname,
|
|
+ bool is_cmd)
|
|
+{
|
|
+ unsigned int i = opts->num_specs;
|
|
+
|
|
+ opts->specs = mrealloc(opts->specs, (i + 1) * sizeof(struct dump_spec));
|
|
+ opts->specs[i].inname = mstrdup(inname);
|
|
+ if (outname)
|
|
+ opts->specs[i].outname = mstrdup(outname);
|
|
+ else
|
|
+ opts->specs[i].outname = NULL;
|
|
+ opts->specs[i].is_cmd = is_cmd;
|
|
+ opts->num_specs++;
|
|
+}
|
|
+
|
|
+int dump_to_tar(struct dump_opts *opts)
|
|
+{
|
|
+ struct task task;
|
|
+ int rc;
|
|
+ long num_cpus;
|
|
+
|
|
+ if (opts->jobs_per_cpu > 0) {
|
|
+ num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
|
|
+ if (num_cpus < 1) {
|
|
+ mwarn("Cannot determine number of CPUs - assuming 1 "
|
|
+ "CPU");
|
|
+ num_cpus = 1;
|
|
+ }
|
|
+ opts->jobs = num_cpus;
|
|
+ }
|
|
+
|
|
+ if (opts->jobs == 0 && (opts->timeout > 0 || opts->file_timeout > 0)) {
|
|
+ /* Separate thread needed to implement timeout via cancel */
|
|
+ opts->jobs = 1;
|
|
+ }
|
|
+
|
|
+ rc = init_task(&task, opts);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+
|
|
+ /* Queue initial job */
|
|
+ init_queue(&task);
|
|
+
|
|
+ /* Process queue */
|
|
+ if (opts->jobs > 0)
|
|
+ rc = process_queue_threaded(&task);
|
|
+ else
|
|
+ rc = process_queue(&task);
|
|
+ abort_queued_jobs(&task);
|
|
+
|
|
+ if (task.output_num_files > 0 && !opts->no_eof)
|
|
+ write_eof(&task);
|
|
+
|
|
+ print_summary(&task);
|
|
+
|
|
+ close_output(&task);
|
|
+
|
|
+ if (rc == 0 && task.aborted)
|
|
+ rc = EXIT_RUNTIME;
|
|
+
|
|
+ return rc;
|
|
+}
|
|
--- /dev/null
|
|
+++ b/dump2tar/src/dump2tar.c
|
|
@@ -0,0 +1,474 @@
|
|
+/*
|
|
+ * dump2tar - tool to dump files and command output into a tar archive
|
|
+ *
|
|
+ * Command line interface
|
|
+ *
|
|
+ * Copyright IBM Corp. 2016
|
|
+ */
|
|
+
|
|
+#include <errno.h>
|
|
+#include <getopt.h>
|
|
+#include <stdio.h>
|
|
+#include <stdlib.h>
|
|
+#include <string.h>
|
|
+#include <time.h>
|
|
+#include <unistd.h>
|
|
+
|
|
+#include "dump.h"
|
|
+#include "global.h"
|
|
+#include "idcache.h"
|
|
+#include "misc.h"
|
|
+#include "strarray.h"
|
|
+#include "util_opt.h"
|
|
+#include "util_prg.h"
|
|
+
|
|
+#define MIN_BUFFER_SIZE 4096
|
|
+
|
|
+#define OPT_NOSHORT_BASE 256
|
|
+
|
|
+#define OPT_DEREFERENCE (OPT_NOSHORT_BASE + 0)
|
|
+#define OPT_NORECURSION (OPT_NOSHORT_BASE + 1)
|
|
+#define OPT_EXCLUDETYPE (OPT_NOSHORT_BASE + 2)
|
|
+
|
|
+/* Program description */
|
|
+static const struct util_prg dump2tar_prg = {
|
|
+ .desc = "Use dump2tar to create a tar archive from the contents "
|
|
+ "of arbitrary files.\nIt works even when the size of actual "
|
|
+ "file content is not known beforehand,\nsuch as with FIFOs, "
|
|
+ "character devices or certain Linux debugfs or sysfs files.\n"
|
|
+ "\nYou can also add files under different names and add "
|
|
+ "command output using the\nformat described in section SPECS "
|
|
+ "below. When no additional options are\nspecified, the "
|
|
+ "resulting archive is written to the standard output stream\n"
|
|
+ "in uncompressed tar format.",
|
|
+ .args = "SPECS",
|
|
+ .copyright_vec = {
|
|
+ { "IBM Corp.", 2016, 2016 },
|
|
+ UTIL_PRG_COPYRIGHT_END
|
|
+ },
|
|
+};
|
|
+
|
|
+/* Definition of command line options */
|
|
+static struct util_opt dump2tar_opts[] = {
|
|
+ UTIL_OPT_SECTION("OUTPUT OPTIONS"),
|
|
+ {
|
|
+ .option = { "output-file", required_argument, NULL, 'o' },
|
|
+ .argument = "FILE",
|
|
+ .desc = "Write archive to FILE (default: standard output)",
|
|
+ },
|
|
+#ifdef HAVE_ZLIB
|
|
+ {
|
|
+ .option = { "gzip", no_argument, NULL, 'z' },
|
|
+ .desc = "Write a gzip compressed archive",
|
|
+ },
|
|
+#endif /* HAVE_ZLIB */
|
|
+ {
|
|
+ .option = { "max-size", required_argument, NULL, 'm' },
|
|
+ .argument = "N",
|
|
+ .desc = "Stop adding files when archive size exceeds N bytes",
|
|
+ },
|
|
+ {
|
|
+ .option = { "timeout", required_argument, NULL, 't' },
|
|
+ .argument = "SEC",
|
|
+ .desc = "Stop adding files after SEC seconds",
|
|
+ },
|
|
+ {
|
|
+ .option = { "no-eof", no_argument, NULL, 131 },
|
|
+ .desc = "Do not write an end-of-file marker",
|
|
+ .flags = UTIL_OPT_FLAG_NOSHORT,
|
|
+ },
|
|
+ {
|
|
+ .option = { "add-cmd-status", no_argument, NULL, 132 },
|
|
+ .desc = "Add status of commands as separate file",
|
|
+ .flags = UTIL_OPT_FLAG_NOSHORT,
|
|
+ },
|
|
+ {
|
|
+ .option = { "append", no_argument, NULL, 133 },
|
|
+ .desc = "Append output to end of file",
|
|
+ .flags = UTIL_OPT_FLAG_NOSHORT,
|
|
+ },
|
|
+
|
|
+ UTIL_OPT_SECTION("INPUT OPTIONS"),
|
|
+ {
|
|
+ .option = { "files-from", required_argument, NULL, 'F' },
|
|
+ .argument = "FILE",
|
|
+ .desc = "Read filenames from FILE (- for standard input)",
|
|
+ },
|
|
+ {
|
|
+ .option = { "ignore-failed-read", no_argument, NULL, 'i' },
|
|
+ .desc = "Continue after read errors",
|
|
+ },
|
|
+ {
|
|
+ .option = { "buffer-size", required_argument, NULL, 'b' },
|
|
+ .argument = "N",
|
|
+ .desc = "Read data in chunks of N byte (default: 16384)",
|
|
+ },
|
|
+ {
|
|
+ .option = { "file-timeout", required_argument, NULL, 'T' },
|
|
+ .desc = "Stop reading file after SEC seconds",
|
|
+ },
|
|
+ {
|
|
+ .option = { "file-max-size", required_argument, NULL, 'M' },
|
|
+ .argument = "N",
|
|
+ .desc = "Stop reading file after N bytes",
|
|
+ },
|
|
+ {
|
|
+ .option = { "jobs", required_argument, NULL, 'j' },
|
|
+ .argument = "N",
|
|
+ .desc = "Read N files in parallel (default: 1)",
|
|
+ },
|
|
+ {
|
|
+ .option = { "jobs-per-cpu", required_argument, NULL, 'J' },
|
|
+ .argument = "N",
|
|
+ .desc = "Read N files per CPU in parallel",
|
|
+ },
|
|
+ {
|
|
+ .option = { "exclude", required_argument, NULL, 'x' },
|
|
+ .argument = "PATTERN",
|
|
+ .desc = "Don't add files matching PATTERN",
|
|
+ },
|
|
+ {
|
|
+ .option = { "exclude-from", required_argument, NULL, 'X' },
|
|
+ .argument = "FILE",
|
|
+ .desc = "Don't add files matching patterns in FILE",
|
|
+ },
|
|
+ {
|
|
+ .option = { "exclude-type", required_argument, NULL,
|
|
+ OPT_EXCLUDETYPE },
|
|
+ .argument = "TYPE",
|
|
+ .desc = "Don't add files of specified TYPE (one of: fdcbpls)",
|
|
+ .flags = UTIL_OPT_FLAG_NOSHORT,
|
|
+ },
|
|
+ {
|
|
+ .option = { "dereference", no_argument, NULL, OPT_DEREFERENCE },
|
|
+ .desc = "Add link targets instead of links",
|
|
+ .flags = UTIL_OPT_FLAG_NOSHORT,
|
|
+ },
|
|
+ {
|
|
+ .option = { "no-recursion", no_argument, NULL,
|
|
+ OPT_NORECURSION },
|
|
+ .desc = "Don't add files from sub-directories",
|
|
+ .flags = UTIL_OPT_FLAG_NOSHORT,
|
|
+ },
|
|
+
|
|
+ UTIL_OPT_SECTION("MISC OPTIONS"),
|
|
+ UTIL_OPT_HELP,
|
|
+ UTIL_OPT_VERSION,
|
|
+ {
|
|
+ .option = { "verbose", no_argument, NULL, 'V' },
|
|
+ .desc = "Print additional informational output",
|
|
+ },
|
|
+ {
|
|
+ .option = { "quiet", no_argument, NULL, 'q' },
|
|
+ .desc = "Suppress printing of informational output",
|
|
+ },
|
|
+ UTIL_OPT_END,
|
|
+};
|
|
+
|
|
+/* Split buffer size specification in @arg into two numbers to be stored in
|
|
+ * @from_ptr and @to_ptr. Return %EXIT_OK on success. */
|
|
+static int parse_buffer_size(char *arg, size_t *from_ptr, size_t *to_ptr)
|
|
+{
|
|
+ char *err;
|
|
+ unsigned long from, to;
|
|
+
|
|
+ if (!*arg) {
|
|
+ mwarnx("Empty buffer size specified");
|
|
+ return EXIT_USAGE;
|
|
+ }
|
|
+
|
|
+ from = strtoul(arg, &err, 10);
|
|
+
|
|
+ if (*err == '-')
|
|
+ to = strtoul(err + 1, &err, 10);
|
|
+ else
|
|
+ to = *to_ptr;
|
|
+
|
|
+ if (*err) {
|
|
+ mwarnx("Invalid buffer size: %s", arg);
|
|
+ return EXIT_USAGE;
|
|
+ }
|
|
+
|
|
+ if (from < MIN_BUFFER_SIZE || to < MIN_BUFFER_SIZE) {
|
|
+ mwarnx("Buffer size too low (minimum %u)", MIN_BUFFER_SIZE);
|
|
+ return EXIT_USAGE;
|
|
+ }
|
|
+
|
|
+ if (to < from)
|
|
+ to = from;
|
|
+
|
|
+ *from_ptr = from;
|
|
+ *to_ptr = to;
|
|
+
|
|
+ return EXIT_OK;
|
|
+}
|
|
+
|
|
+static void parse_and_add_spec(struct dump_opts *opts, const char *spec)
|
|
+{
|
|
+ char *op, *s, *inname, *outname = NULL;
|
|
+ bool is_cmd = false;
|
|
+
|
|
+ s = mstrdup(spec);
|
|
+ op = strstr(s, "|=");
|
|
+ if (op)
|
|
+ is_cmd = true;
|
|
+ else
|
|
+ op = strstr(s, ":=");
|
|
+
|
|
+ if (op) {
|
|
+ *op = 0;
|
|
+ inname = op + 2;
|
|
+ outname = s;
|
|
+ } else {
|
|
+ inname = s;
|
|
+ }
|
|
+ dump_opts_add_spec(opts, inname, outname, is_cmd);
|
|
+ free(s);
|
|
+}
|
|
+
|
|
+static int add_specs_from_file(struct dump_opts *opts, const char *filename)
|
|
+{
|
|
+ FILE *fd;
|
|
+ char *line = NULL;
|
|
+ size_t line_size;
|
|
+ int rc = EXIT_RUNTIME;
|
|
+ bool need_close = false, parse_spec = true;
|
|
+
|
|
+ if (strcmp(filename, "-") == 0)
|
|
+ fd = stdin;
|
|
+ else {
|
|
+ fd = fopen(filename, "r");
|
|
+ if (!fd) {
|
|
+ mwarn("%s: Cannot open file", filename);
|
|
+ goto out;
|
|
+ }
|
|
+ need_close = true;
|
|
+ }
|
|
+
|
|
+ while ((getline(&line, &line_size, fd) != -1)) {
|
|
+ chomp(line, "\n");
|
|
+ if (line[0] == 0)
|
|
+ continue;
|
|
+ if (parse_spec && strcmp(line, "--") == 0) {
|
|
+ /* After a line containing --, no more := or |= specs
|
|
+ * are expected */
|
|
+ parse_spec = false;
|
|
+ continue;
|
|
+ }
|
|
+ if (parse_spec)
|
|
+ parse_and_add_spec(opts, line);
|
|
+ else
|
|
+ dump_opts_add_spec(opts, line, NULL, false);
|
|
+ }
|
|
+
|
|
+ if (ferror(fd))
|
|
+ mwarn("%s: Cannot read file", filename);
|
|
+ else
|
|
+ rc = EXIT_OK;
|
|
+
|
|
+out:
|
|
+ if (need_close)
|
|
+ fclose(fd);
|
|
+ free(line);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+static void print_help(void)
|
|
+{
|
|
+ static const struct {
|
|
+ const char *name;
|
|
+ const char *desc;
|
|
+ } specs[] = {
|
|
+ { "PATH", "Add file or directory at PATH" },
|
|
+ { "NEWPATH:=PATH", "Add file or directory at PATH as NEWPATH" },
|
|
+ { "NEWPATH|=CMDLINE", "Add output of command line CMDLINE as "
|
|
+ "NEWPATH" },
|
|
+ { NULL, NULL },
|
|
+ };
|
|
+ int i;
|
|
+
|
|
+ util_prg_print_help();
|
|
+ printf("SPECS\n");
|
|
+ for (i = 0; specs[i].name; i++)
|
|
+ util_opt_print_indented(specs[i].name, specs[i].desc);
|
|
+ printf("\n");
|
|
+ util_opt_print_help();
|
|
+}
|
|
+
|
|
+int main(int argc, char *argv[])
|
|
+{
|
|
+ int rc = EXIT_USAGE, opt;
|
|
+ long i;
|
|
+ struct dump_opts *opts;
|
|
+
|
|
+ if (getenv("DUMP2TAR_DEBUG"))
|
|
+ global_debug = true;
|
|
+
|
|
+ util_prg_init(&dump2tar_prg);
|
|
+ util_opt_init(dump2tar_opts, "-");
|
|
+ misc_init();
|
|
+
|
|
+ opts = dump_opts_new();
|
|
+ opterr = 0;
|
|
+ while ((opt = util_opt_getopt_long(argc, argv)) != -1) {
|
|
+ switch (opt) {
|
|
+ case 'h': /* --help */
|
|
+ print_help();
|
|
+ rc = EXIT_OK;
|
|
+ goto out;
|
|
+ case 'v': /* --version */
|
|
+ util_prg_print_version();
|
|
+ rc = EXIT_OK;
|
|
+ goto out;
|
|
+ case 'V': /* --verbose */
|
|
+ global_verbose = true;
|
|
+ global_quiet = false;
|
|
+ opts->verbose = true;
|
|
+ opts->quiet = false;
|
|
+ break;
|
|
+ case 'q': /* --quiet */
|
|
+ global_quiet = true;
|
|
+ global_verbose = false;
|
|
+ opts->quiet = true;
|
|
+ opts->verbose = false;
|
|
+ break;
|
|
+ case 'i': /* --ignore-failed-read */
|
|
+ opts->ignore_failed_read = true;
|
|
+ break;
|
|
+ case 'j': /* --jobs N */
|
|
+ opts->jobs = atoi(optarg);
|
|
+ if (opts->jobs < 1) {
|
|
+ mwarnx("Invalid number of jobs: %s", optarg);
|
|
+ goto out;
|
|
+ }
|
|
+ break;
|
|
+ case 'J': /* --jobs-per-cpu N */
|
|
+ opts->jobs_per_cpu = atoi(optarg);
|
|
+ if (opts->jobs_per_cpu < 1) {
|
|
+ mwarnx("Invalid number of jobs: %s", optarg);
|
|
+ goto out;
|
|
+ }
|
|
+ break;
|
|
+ case 'b': /* --buffer-size N */
|
|
+ if (parse_buffer_size(optarg, &opts->read_chunk_size,
|
|
+ &opts->max_buffer_size))
|
|
+ goto out;
|
|
+ break;
|
|
+ case 'x': /* --exclude PATTERN */
|
|
+ add_str_to_strarray(&opts->exclude, optarg);
|
|
+ break;
|
|
+ case 'X': /* --exclude-from FILE */
|
|
+ if (add_file_to_strarray(&opts->exclude, optarg))
|
|
+ goto out;
|
|
+ break;
|
|
+ case 'F': /* --files-from FILE */
|
|
+ if (add_specs_from_file(opts, optarg))
|
|
+ goto out;
|
|
+ break;
|
|
+ case 'o': /* --output-file FILE */
|
|
+ if (opts->output_file) {
|
|
+ mwarnx("Output file specified multiple times");
|
|
+ goto out;
|
|
+ }
|
|
+ opts->output_file = optarg;
|
|
+ break;
|
|
+ case OPT_DEREFERENCE: /* --dereference */
|
|
+ opts->dereference = true;
|
|
+ break;
|
|
+ case OPT_NORECURSION: /* --no-recursion */
|
|
+ opts->recursive = false;
|
|
+ break;
|
|
+ case OPT_EXCLUDETYPE: /* --exclude-type TYPE */
|
|
+ for (i = 0; optarg[i]; i++) {
|
|
+ if (dump_opts_set_type_excluded(opts,
|
|
+ optarg[i]))
|
|
+ break;
|
|
+
|
|
+ }
|
|
+ if (optarg[i]) {
|
|
+ mwarnx("Unrecognized file type: %c", optarg[i]);
|
|
+ goto out;
|
|
+ }
|
|
+ break;
|
|
+ case 131: /* --no-eof */
|
|
+ opts->no_eof = true;
|
|
+ break;
|
|
+ case 132: /* --add-cmd-status */
|
|
+ opts->add_cmd_status = true;
|
|
+ break;
|
|
+ case 133: /* --append */
|
|
+ opts->append = true;
|
|
+ break;
|
|
+ case 't': /* --timeout VALUE */
|
|
+ opts->timeout = atoi(optarg);
|
|
+ if (opts->timeout < 1) {
|
|
+ mwarnx("Invalid timeout value: %s", optarg);
|
|
+ goto out;
|
|
+ }
|
|
+ break;
|
|
+ case 'T': /* --file-timeout VALUE */
|
|
+ opts->file_timeout = atoi(optarg);
|
|
+ if (opts->file_timeout < 1) {
|
|
+ mwarnx("Invalid timeout value: %s", optarg);
|
|
+ goto out;
|
|
+ }
|
|
+ break;
|
|
+ case 'm': /* --max-size N */
|
|
+ opts->max_size = atol(optarg);
|
|
+ if (opts->max_size < 2) {
|
|
+ mwarnx("Invalid maximum size: %s", optarg);
|
|
+ goto out;
|
|
+ }
|
|
+ break;
|
|
+ case 'M': /* --file-max-size N */
|
|
+ opts->file_max_size = atol(optarg);
|
|
+ if (opts->file_max_size < 2) {
|
|
+ mwarnx("Invalid maximum size: %s", optarg);
|
|
+ goto out;
|
|
+ }
|
|
+ break;
|
|
+ case 'z': /* --gzip */
|
|
+ opts->gzip = true;
|
|
+ break;
|
|
+ case 1: /* Filename specification or unrecognized option */
|
|
+ if (optarg[0] == '-') {
|
|
+ mwarnx("Invalid option '%s'", optarg);
|
|
+ goto out;
|
|
+ }
|
|
+ parse_and_add_spec(opts, optarg);
|
|
+ break;
|
|
+ case '?': /* Unrecognized option */
|
|
+ if (optopt)
|
|
+ mwarnx("Invalid option '-%c'", optopt);
|
|
+ else
|
|
+ mwarnx("Invalid option '%s'", argv[optind - 1]);
|
|
+ goto out;
|
|
+ case ':': /* Missing argument */
|
|
+ mwarnx("Option '%s' requires an argument",
|
|
+ argv[optind - 1]);
|
|
+ goto out;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ if (optind >= argc && opts->num_specs == 0) {
|
|
+ mwarnx("Please specify files to dump");
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ for (i = optind; i < argc; i++)
|
|
+ dump_opts_add_spec(opts, argv[i], NULL, false);
|
|
+
|
|
+ rc = dump_to_tar(opts);
|
|
+
|
|
+out:
|
|
+ idcache_cleanup();
|
|
+ misc_cleanup();
|
|
+ dump_opts_free(opts);
|
|
+
|
|
+ if (rc == EXIT_USAGE)
|
|
+ util_prg_print_parse_error();
|
|
+
|
|
+ return rc;
|
|
+}
|
|
--- /dev/null
|
|
+++ b/dump2tar/src/global.c
|
|
@@ -0,0 +1,17 @@
|
|
+/*
|
|
+ * dump2tar - tool to dump files and command output into a tar archive
|
|
+ *
|
|
+ * Global variables
|
|
+ *
|
|
+ * Copyright IBM Corp. 2016
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include "global.h"
|
|
+
|
|
+/* Global settings */
|
|
+bool global_threaded;
|
|
+bool global_debug;
|
|
+bool global_verbose;
|
|
+bool global_quiet;
|
|
+bool global_timestamps;
|
|
--- /dev/null
|
|
+++ b/dump2tar/src/idcache.c
|
|
@@ -0,0 +1,153 @@
|
|
+/*
|
|
+ * dump2tar - tool to dump files and command output into a tar archive
|
|
+ *
|
|
+ * Caches for user and group ID lookups
|
|
+ *
|
|
+ * Copyright IBM Corp. 2016
|
|
+ */
|
|
+
|
|
+#include "idcache.h"
|
|
+
|
|
+#include <grp.h>
|
|
+#include <pthread.h>
|
|
+#include <pwd.h>
|
|
+#include <stdlib.h>
|
|
+#include <string.h>
|
|
+
|
|
+#include "global.h"
|
|
+#include "misc.h"
|
|
+
|
|
+/* Maximum user and group name lengths as defined in tar header */
|
|
+#define ID_NAME_MAXLEN 32
|
|
+
|
|
+/* Types for user and group ID caches */
|
|
+typedef uid_t generic_id_t; /* Assumes that uid_t == gid_t */
|
|
+
|
|
+struct id_cache_entry {
|
|
+ generic_id_t id;
|
|
+ char name[ID_NAME_MAXLEN];
|
|
+};
|
|
+
|
|
+struct id_cache {
|
|
+ unsigned int num;
|
|
+ struct id_cache_entry entries[];
|
|
+};
|
|
+
|
|
+/* cache_mutex serializes access to cached uid and gid data */
|
|
+static pthread_mutex_t id_cache_mutex = PTHREAD_MUTEX_INITIALIZER;
|
|
+static struct id_cache *id_cache_uid;
|
|
+static struct id_cache *id_cache_gid;
|
|
+
|
|
+/* Lock cache mutex */
|
|
+static void cache_lock(void)
|
|
+{
|
|
+ if (!global_threaded)
|
|
+ return;
|
|
+ pthread_mutex_lock(&id_cache_mutex);
|
|
+}
|
|
+
|
|
+/* Unlock cache mutex */
|
|
+static void cache_unlock(void)
|
|
+{
|
|
+ if (!global_threaded)
|
|
+ return;
|
|
+ pthread_mutex_unlock(&id_cache_mutex);
|
|
+}
|
|
+
|
|
+/* Copy the name associated with @id in @cache to at most @len bytes at @dest.
|
|
+ * Return %true if name was found in cache, %false otherwise. */
|
|
+static bool strncpy_id_cache_entry(char *dest, struct id_cache *cache,
|
|
+ generic_id_t id, size_t len)
|
|
+{
|
|
+ unsigned int i;
|
|
+ bool hit = false;
|
|
+
|
|
+ cache_lock();
|
|
+ if (cache) {
|
|
+ for (i = 0; i < cache->num; i++) {
|
|
+ if (cache->entries[i].id == id) {
|
|
+ strncpy(dest, cache->entries[i].name, len);
|
|
+ hit = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ cache_unlock();
|
|
+
|
|
+ return hit;
|
|
+}
|
|
+
|
|
+/* Add a new entry consisting of @id and @name to ID cache in @*cache_ptr.
|
|
+ * Update @cache_ptr if necessary. */
|
|
+static void add_id_cache_entry(struct id_cache **cache_ptr, generic_id_t id,
|
|
+ char *name)
|
|
+{
|
|
+ struct id_cache *cache;
|
|
+ unsigned int cache_num;
|
|
+ size_t new_size;
|
|
+ struct id_cache *new_cache;
|
|
+
|
|
+ cache_lock();
|
|
+
|
|
+ cache = *cache_ptr;
|
|
+ cache_num = cache ? cache->num : 0;
|
|
+ new_size = sizeof(struct id_cache) +
|
|
+ sizeof(struct id_cache_entry) * (cache_num + 1);
|
|
+ new_cache = mrealloc(cache, new_size);
|
|
+ if (cache_num == 0)
|
|
+ new_cache->num = 0;
|
|
+ new_cache->entries[cache_num].id = id;
|
|
+ strncpy(new_cache->entries[cache_num].name, name, ID_NAME_MAXLEN);
|
|
+ new_cache->num++;
|
|
+ *cache_ptr = new_cache;
|
|
+
|
|
+ cache_unlock();
|
|
+}
|
|
+
|
|
+/* Copy the user name corresponding to user ID @uid to at most @len bytes
|
|
+ * at @name */
|
|
+void uid_to_name(uid_t uid, char *name, size_t len)
|
|
+{
|
|
+ struct passwd pwd, *pwd_ptr;
|
|
+ char buffer[PWD_BUFFER_SIZE], *result;
|
|
+
|
|
+ if (strncpy_id_cache_entry(name, id_cache_uid, uid, len))
|
|
+ return;
|
|
+
|
|
+ /* getpwuid() can be slow so cache results */
|
|
+ getpwuid_r(uid, &pwd, buffer, PWD_BUFFER_SIZE, &pwd_ptr);
|
|
+ if (!pwd_ptr || !pwd_ptr->pw_name)
|
|
+ return;
|
|
+ result = pwd_ptr->pw_name;
|
|
+
|
|
+ add_id_cache_entry(&id_cache_uid, uid, result);
|
|
+
|
|
+ strncpy(name, result, len);
|
|
+}
|
|
+
|
|
+/* Copy the group name corresponding to group ID @gid to at most @len bytes
|
|
+ * at @name */
|
|
+void gid_to_name(gid_t gid, char *name, size_t len)
|
|
+{
|
|
+ struct group grp, *grp_ptr;
|
|
+ char buffer[GRP_BUFFER_SIZE], *result;
|
|
+
|
|
+ if (strncpy_id_cache_entry(name, id_cache_gid, gid, len))
|
|
+ return;
|
|
+
|
|
+ /* getgrgid() can be slow so cache results */
|
|
+ getgrgid_r(gid, &grp, buffer, GRP_BUFFER_SIZE, &grp_ptr);
|
|
+ if (!grp_ptr || !grp_ptr->gr_name)
|
|
+ return;
|
|
+ result = grp_ptr->gr_name;
|
|
+
|
|
+ add_id_cache_entry(&id_cache_gid, gid, result);
|
|
+
|
|
+ strncpy(name, result, len);
|
|
+}
|
|
+
|
|
+void idcache_cleanup(void)
|
|
+{
|
|
+ free(id_cache_uid);
|
|
+ free(id_cache_gid);
|
|
+}
|
|
--- /dev/null
|
|
+++ b/dump2tar/src/misc.c
|
|
@@ -0,0 +1,492 @@
|
|
+/*
|
|
+ * dump2tar - tool to dump files and command output into a tar archive
|
|
+ *
|
|
+ * Helper functions
|
|
+ *
|
|
+ * Copyright IBM Corp. 2016
|
|
+ */
|
|
+
|
|
+#define _GNU_SOURCE /* for program_invocation_short_name */
|
|
+
|
|
+#include "misc.h"
|
|
+
|
|
+#include <errno.h>
|
|
+#include <fcntl.h>
|
|
+#include <pthread.h>
|
|
+#include <stdarg.h>
|
|
+#include <stdio.h>
|
|
+#include <stdlib.h>
|
|
+#include <string.h>
|
|
+#include <sys/wait.h>
|
|
+#include <unistd.h>
|
|
+
|
|
+#include "dref.h"
|
|
+#include "global.h"
|
|
+
|
|
+struct timespec main_start_ts;
|
|
+
|
|
+static pthread_key_t thread_name_key;
|
|
+static bool stdout_data;
|
|
+
|
|
+/* Write @len bytes at @addr to @fd. Return %EXIT_OK on success, %EXIT_RUNTIME
|
|
+ * otherwise. */
|
|
+int misc_write_data(int fd, char *addr, size_t len)
|
|
+{
|
|
+ ssize_t w;
|
|
+
|
|
+ while (len > 0) {
|
|
+ w = write(fd, addr, len);
|
|
+ if (w < 0)
|
|
+ return EXIT_RUNTIME;
|
|
+ len -= w;
|
|
+ addr += w;
|
|
+ }
|
|
+
|
|
+ return EXIT_OK;
|
|
+}
|
|
+
|
|
+/* Read at most @len bytes from @fd to @addr. Return the number of bytes read
|
|
+ * or %-1 on error. */
|
|
+ssize_t misc_read_data(int fd, char *addr, size_t len)
|
|
+{
|
|
+ size_t done = 0;
|
|
+ ssize_t r;
|
|
+
|
|
+ while (len > 0) {
|
|
+ r = read(fd, addr, len);
|
|
+ if (r < 0)
|
|
+ return -1;
|
|
+ if (r == 0)
|
|
+ break;
|
|
+ len -= r;
|
|
+ addr += r;
|
|
+ done += r;
|
|
+ }
|
|
+
|
|
+ return done;
|
|
+}
|
|
+
|
|
+/* Advance timespec @ts by @sec seconds and @nsec nanoseconds */
|
|
+void inc_timespec(struct timespec *ts, time_t sec, long nsec)
|
|
+{
|
|
+ ts->tv_nsec += nsec;
|
|
+ ts->tv_sec += sec;
|
|
+ if (ts->tv_nsec > NSEC_PER_SEC) {
|
|
+ ts->tv_nsec -= NSEC_PER_SEC;
|
|
+ ts->tv_sec++;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Set timespec @ts to point to @sec seconds and @nsec nanoseconds in the
|
|
+ * future */
|
|
+void set_timespec(struct timespec *ts, time_t sec, long nsec)
|
|
+{
|
|
+ clock_gettime(CLOCK_MONOTONIC, ts);
|
|
+ inc_timespec(ts, sec, nsec);
|
|
+}
|
|
+
|
|
+/* Return true if timespec @a refers to a point in time before @b */
|
|
+bool ts_before(struct timespec *a, struct timespec *b)
|
|
+{
|
|
+ if (a->tv_sec < b->tv_sec ||
|
|
+ (a->tv_sec == b->tv_sec && a->tv_nsec < b->tv_nsec))
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+/* Store a string representing the time duration between @start and @end in
|
|
+ * at most @len bytes of @buff. */
|
|
+int snprintf_duration(char *buff, size_t len, struct timespec *start,
|
|
+ struct timespec *end)
|
|
+{
|
|
+ time_t sec;
|
|
+ long nsec, msec, s, m, h;
|
|
+
|
|
+ sec = end->tv_sec - start->tv_sec;
|
|
+ nsec = end->tv_nsec - start->tv_nsec;
|
|
+
|
|
+ if (nsec < 0) {
|
|
+ nsec += NSEC_PER_SEC;
|
|
+ sec--;
|
|
+ }
|
|
+
|
|
+ msec = nsec / NSEC_PER_MSEC;
|
|
+ s = sec % 60;
|
|
+ sec /= 60;
|
|
+ m = sec % 60;
|
|
+ sec /= 60;
|
|
+ h = sec;
|
|
+
|
|
+ if (h > 0)
|
|
+ return snprintf(buff, len, "%luh%lum%lu.%03lus", h, m, s, msec);
|
|
+ else if (m > 0)
|
|
+ return snprintf(buff, len, "%lum%lu.%03lus", m, s, msec);
|
|
+ else
|
|
+ return snprintf(buff, len, "%lu.%03lus", s, msec);
|
|
+}
|
|
+
|
|
+/* Return the name of the current thread */
|
|
+char *get_threadname(void)
|
|
+{
|
|
+ return pthread_getspecific(thread_name_key);
|
|
+}
|
|
+
|
|
+static int snprintf_timestamp(char *str, size_t size)
|
|
+{
|
|
+ struct timespec now_ts;
|
|
+
|
|
+ set_timespec(&now_ts, 0, 0);
|
|
+ now_ts.tv_sec -= main_start_ts.tv_sec;
|
|
+ now_ts.tv_nsec -= main_start_ts.tv_nsec;
|
|
+ if (now_ts.tv_nsec < 0) {
|
|
+ now_ts.tv_nsec += NSEC_PER_SEC;
|
|
+ now_ts.tv_sec--;
|
|
+ }
|
|
+
|
|
+ return snprintf(str, size, "[%3lu.%06lu] ", now_ts.tv_sec,
|
|
+ now_ts.tv_nsec / NSEC_PER_USEC);
|
|
+}
|
|
+
|
|
+/* When DUMP2TAR_DEBUG is set to non-zero, print debugging information */
|
|
+void debug(const char *file, unsigned long line, const char *format, ...)
|
|
+{
|
|
+ char msg[MSG_LEN];
|
|
+ size_t off = 0;
|
|
+ int rc;
|
|
+ va_list args;
|
|
+
|
|
+ /* Debug marker */
|
|
+ rc = snprintf(&msg[off], MSG_LEN - off, "DEBUG: ");
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+
|
|
+ /* Timestamp */
|
|
+ rc = snprintf_timestamp(&msg[off], MSG_LEN - off);
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+
|
|
+ /* Thread name */
|
|
+ rc = snprintf(&msg[off], MSG_LEN - off, "%s: ", get_threadname());
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+
|
|
+ /* Message */
|
|
+ va_start(args, format);
|
|
+ rc = vsnprintf(&msg[off], MSG_LEN - off, format, args);
|
|
+ va_end(args);
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+
|
|
+ /* Call site */
|
|
+ rc = snprintf(&msg[off], MSG_LEN - off, " (%s:%lu)", file, line);
|
|
+
|
|
+out:
|
|
+ fprintf(stderr, "%s\n", msg);
|
|
+}
|
|
+
|
|
+/* Print a warning message consisting of @format and variable arguments.
|
|
+ * If @print_errno is true, also print the text corresponding to errno.
|
|
+ * We're not using err.h's warn since we want timestamps and synchronized
|
|
+ * output. */
|
|
+void _mwarn(bool print_errno, const char *format, ...)
|
|
+{
|
|
+ char msg[MSG_LEN];
|
|
+ size_t off = 0;
|
|
+ int rc;
|
|
+ va_list args;
|
|
+
|
|
+ if (global_timestamps) {
|
|
+ rc = snprintf_timestamp(&msg[off], MSG_LEN - off);
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+ }
|
|
+
|
|
+ rc = snprintf(&msg[off], MSG_LEN - off, "%s: ",
|
|
+ program_invocation_short_name);
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+
|
|
+ va_start(args, format);
|
|
+ rc = vsnprintf(&msg[off], MSG_LEN - off, format, args);
|
|
+ va_end(args);
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+
|
|
+ if (print_errno)
|
|
+ snprintf(&msg[off], MSG_LEN - off, ": %s", strerror(errno));
|
|
+
|
|
+out:
|
|
+ fprintf(stderr, "%s\n", msg);
|
|
+}
|
|
+
|
|
+/* Provide informational output if --verbose was specified */
|
|
+void verb(const char *format, ...)
|
|
+{
|
|
+ char msg[MSG_LEN];
|
|
+ size_t off = 0;
|
|
+ int rc;
|
|
+ va_list args;
|
|
+ FILE *fd;
|
|
+
|
|
+ if (!global_verbose)
|
|
+ return;
|
|
+ if (stdout_data)
|
|
+ fd = stderr;
|
|
+ else
|
|
+ fd = stdout;
|
|
+ if (global_timestamps) {
|
|
+ rc = snprintf_timestamp(&msg[off], MSG_LEN - off);
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+ }
|
|
+
|
|
+ va_start(args, format);
|
|
+ rc = vsnprintf(&msg[off], MSG_LEN - off, format, args);
|
|
+ va_end(args);
|
|
+
|
|
+out:
|
|
+ fprintf(fd, "%s", msg);
|
|
+}
|
|
+
|
|
+/* Provide informational output. */
|
|
+void info(const char *format, ...)
|
|
+{
|
|
+ char msg[MSG_LEN];
|
|
+ size_t off = 0;
|
|
+ int rc;
|
|
+ va_list args;
|
|
+ FILE *fd;
|
|
+
|
|
+ if (global_quiet)
|
|
+ return;
|
|
+ if (stdout_data)
|
|
+ fd = stderr;
|
|
+ else
|
|
+ fd = stdout;
|
|
+
|
|
+ if (global_timestamps) {
|
|
+ rc = snprintf_timestamp(&msg[off], MSG_LEN - off);
|
|
+ HANDLE_RC(rc, MSG_LEN, off, out);
|
|
+ }
|
|
+
|
|
+ va_start(args, format);
|
|
+ rc = vsnprintf(&msg[off], MSG_LEN - off, format, args);
|
|
+ va_end(args);
|
|
+
|
|
+out:
|
|
+ fprintf(fd, "%s", msg);
|
|
+}
|
|
+
|
|
+/* Return a newly allocated buffer containing the result of the specified
|
|
+ * string format arguments */
|
|
+char *__masprintf(const char *func, const char *file, int line, const char *fmt,
|
|
+ ...)
|
|
+{
|
|
+ char *str;
|
|
+ va_list args;
|
|
+
|
|
+ va_start(args, fmt);
|
|
+ __util_vasprintf(func, file, line, &str, fmt, args);
|
|
+ va_end(args);
|
|
+
|
|
+ return str;
|
|
+}
|
|
+
|
|
+/* Set the internal name of the calling thread */
|
|
+void __set_threadname(const char *func, const char *file, int line,
|
|
+ const char *fmt, ...)
|
|
+{
|
|
+ char *str;
|
|
+ va_list args;
|
|
+
|
|
+ va_start(args, fmt);
|
|
+ __util_vasprintf(func, file, line, &str, fmt, args);
|
|
+ va_end(args);
|
|
+
|
|
+ pthread_setspecific(thread_name_key, str);
|
|
+}
|
|
+
|
|
+/* Clear any previously set thread name */
|
|
+void clear_threadname(void)
|
|
+{
|
|
+ void *addr = pthread_getspecific(thread_name_key);
|
|
+
|
|
+ if (addr) {
|
|
+ pthread_setspecific(thread_name_key, NULL);
|
|
+ free(addr);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Remove any number of trailing characters @c in @str */
|
|
+void chomp(char *str, char *c)
|
|
+{
|
|
+ ssize_t i;
|
|
+
|
|
+ for (i = strlen(str) - 1; i >= 0 && strchr(c, str[i]); i--)
|
|
+ str[i] = 0;
|
|
+}
|
|
+
|
|
+/* Remove any number of leading characters @c in @str */
|
|
+void lchomp(char *str, char *c)
|
|
+{
|
|
+ char *from;
|
|
+
|
|
+ for (from = str; *from && strchr(c, *from); from++)
|
|
+ ;
|
|
+ if (str != from)
|
|
+ memmove(str, from, strlen(from) + 1);
|
|
+}
|
|
+
|
|
+/* Perform a stat on file referenced by either @abs or @rel and @dref. Store
|
|
+ * results in @stat and return stat()'s return code. */
|
|
+int stat_file(bool dereference, const char *abs, const char *rel,
|
|
+ struct dref *dref, struct stat *st)
|
|
+{
|
|
+ int rc;
|
|
+
|
|
+ if (dref) {
|
|
+ if (dereference)
|
|
+ rc = fstatat(dref->dirfd, rel, st, 0);
|
|
+ else
|
|
+ rc = fstatat(dref->dirfd, rel, st, AT_SYMLINK_NOFOLLOW);
|
|
+ } else {
|
|
+ if (dereference)
|
|
+ rc = stat(abs, st);
|
|
+ else
|
|
+ rc = lstat(abs, st);
|
|
+ }
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/* Fill stat buffer @st with dummy values. */
|
|
+void set_dummy_stat(struct stat *st)
|
|
+{
|
|
+ /* Fake stat */
|
|
+ memset(st, 0, sizeof(struct stat));
|
|
+ st->st_mode = S_IRUSR | S_IWUSR | S_IFREG;
|
|
+ st->st_uid = geteuid();
|
|
+ st->st_gid = getegid();
|
|
+ st->st_mtime = time(NULL);
|
|
+}
|
|
+
|
|
+/* Redirect all output streams to @fd and execute command @CMD */
|
|
+int cmd_child(int fd, char *cmd)
|
|
+{
|
|
+ char *argv[] = { "/bin/sh", "-c", NULL, NULL };
|
|
+ char *env[] = { NULL };
|
|
+
|
|
+ argv[2] = cmd;
|
|
+ if (dup2(fd, STDOUT_FILENO) == -1 || dup2(fd, STDERR_FILENO) == -1) {
|
|
+ mwarn("Could not redirect command output");
|
|
+ return EXIT_RUNTIME;
|
|
+ }
|
|
+
|
|
+ execve("/bin/sh", argv, env);
|
|
+
|
|
+ return EXIT_RUNTIME;
|
|
+}
|
|
+
|
|
+#define PIPE_READ 0
|
|
+#define PIPE_WRITE 1
|
|
+
|
|
+/* Run command @cmd as a child process and store its PID in @pid_ptr. On
|
|
+ * success, return a file descriptor that is an output pipe to the standard
|
|
+ * output and standard error streams of the child process. Return %-1 on
|
|
+ * error. */
|
|
+int cmd_open(char *cmd, pid_t *pid_ptr)
|
|
+{
|
|
+ int pfd[2];
|
|
+ pid_t pid;
|
|
+
|
|
+ if (pipe(pfd) < 0)
|
|
+ return -1;
|
|
+
|
|
+ pid = fork();
|
|
+ if (pid < 0) {
|
|
+ /* Fork error */
|
|
+ close(pfd[PIPE_READ]);
|
|
+ close(pfd[PIPE_WRITE]);
|
|
+ return -1;
|
|
+ } else if (pid == 0) {
|
|
+ /* Child process */
|
|
+ close(pfd[PIPE_READ]);
|
|
+ exit(cmd_child(pfd[PIPE_WRITE], cmd));
|
|
+ }
|
|
+
|
|
+ /* Parent process */
|
|
+ close(pfd[PIPE_WRITE]);
|
|
+
|
|
+ *pid_ptr = pid;
|
|
+
|
|
+ return pfd[PIPE_READ];
|
|
+}
|
|
+
|
|
+/* Close the file descriptor @fd and end the process with PID @pid. When
|
|
+ * not %NULL, use @status_ptr to store the resulting process status. */
|
|
+int cmd_close(int fd, pid_t pid, int *status_ptr)
|
|
+{
|
|
+ int status, rc = EXIT_OK;
|
|
+
|
|
+ close(fd);
|
|
+ kill(pid, SIGQUIT);
|
|
+ if (waitpid(pid, &status, 0) == -1) {
|
|
+ status = -errno;
|
|
+ rc = EXIT_RUNTIME;
|
|
+ }
|
|
+ if (status_ptr)
|
|
+ *status_ptr = status;
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+void misc_init(void)
|
|
+{
|
|
+ set_timespec(&main_start_ts, 0, 0);
|
|
+ pthread_key_create(&thread_name_key, free);
|
|
+ set_threadname("main");
|
|
+}
|
|
+
|
|
+void misc_cleanup(void)
|
|
+{
|
|
+ clear_threadname();
|
|
+ pthread_key_delete(thread_name_key);
|
|
+}
|
|
+
|
|
+void set_stdout_data(void)
|
|
+{
|
|
+ stdout_data = true;
|
|
+}
|
|
+
|
|
+bool starts_with(const char *str, const char *prefix)
|
|
+{
|
|
+ size_t len;
|
|
+
|
|
+ len = strlen(prefix);
|
|
+
|
|
+ if (strncmp(str, prefix, len) == 0)
|
|
+ return true;
|
|
+ return false;
|
|
+}
|
|
+
|
|
+bool ends_with(const char *str, const char *suffix)
|
|
+{
|
|
+ size_t str_len, s_len;
|
|
+
|
|
+ str_len = strlen(str);
|
|
+ s_len = strlen(suffix);
|
|
+
|
|
+ if (str_len < s_len)
|
|
+ return false;
|
|
+ if (strcmp(str + str_len - s_len, suffix) != 0)
|
|
+ return false;
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/* Remove subsequent slashes in @str */
|
|
+void remove_double_slashes(char *str)
|
|
+{
|
|
+ size_t i, to;
|
|
+ char last;
|
|
+
|
|
+ last = 0;
|
|
+ for (i = 0, to = 0; str[i]; i++) {
|
|
+ if (last != '/' || str[i] != '/')
|
|
+ last = str[to++] = str[i];
|
|
+ }
|
|
+ str[to] = 0;
|
|
+}
|
|
--- /dev/null
|
|
+++ b/dump2tar/src/strarray.c
|
|
@@ -0,0 +1,81 @@
|
|
+/*
|
|
+ * dump2tar - tool to dump files and command output into a tar archive
|
|
+ *
|
|
+ * Dynamically growing string arrays
|
|
+ *
|
|
+ * Copyright IBM Corp. 2016
|
|
+ */
|
|
+
|
|
+#include "strarray.h"
|
|
+
|
|
+#include <stdarg.h>
|
|
+#include <stdio.h>
|
|
+#include <stdlib.h>
|
|
+
|
|
+#include "misc.h"
|
|
+
|
|
+/* Release resources associated with string array @array */
|
|
+void free_strarray(struct strarray *array)
|
|
+{
|
|
+ unsigned int i;
|
|
+
|
|
+ for (i = 0; i < array->num; i++)
|
|
+ free(array->str[i]);
|
|
+ free(array->str);
|
|
+ array->str = NULL;
|
|
+ array->num = 0;
|
|
+}
|
|
+
|
|
+/* Add string @str to string array @array */
|
|
+void add_str_to_strarray(struct strarray *array, const char *str)
|
|
+{
|
|
+ array->str = mrealloc(array->str, sizeof(char *) * (array->num + 2));
|
|
+ array->str[array->num + 1] = NULL;
|
|
+ array->str[array->num] = mstrdup(str);
|
|
+ array->num++;
|
|
+}
|
|
+
|
|
+/* Add string resulting from @fmt and additional arguments to @array */
|
|
+void add_vstr_to_strarray(struct strarray *array, const char *fmt, ...)
|
|
+{
|
|
+ va_list args;
|
|
+ char *str;
|
|
+
|
|
+ va_start(args, fmt);
|
|
+ util_vasprintf(&str, fmt, args);
|
|
+ va_end(args);
|
|
+
|
|
+ array->str = mrealloc(array->str, sizeof(char *) * (array->num + 2));
|
|
+ array->str[array->num + 1] = NULL;
|
|
+ array->str[array->num] = str;
|
|
+ array->num++;
|
|
+}
|
|
+
|
|
+/* Add all lines in file at @filename to @array */
|
|
+int add_file_to_strarray(struct strarray *array, const char *filename)
|
|
+{
|
|
+ FILE *fd;
|
|
+ char *line = NULL;
|
|
+ size_t line_size;
|
|
+ int rc = EXIT_OK;
|
|
+
|
|
+ fd = fopen(filename, "r");
|
|
+ if (!fd) {
|
|
+ mwarn("%s: Cannot open file", filename);
|
|
+ return EXIT_RUNTIME;
|
|
+ }
|
|
+
|
|
+ while (!feof(fd) && !ferror(fd)) {
|
|
+ if (getline(&line, &line_size, fd) == -1)
|
|
+ continue;
|
|
+ chomp(line, "\n");
|
|
+ add_str_to_strarray(array, line);
|
|
+ }
|
|
+ if (ferror(fd))
|
|
+ rc = EXIT_RUNTIME;
|
|
+ free(line);
|
|
+
|
|
+ fclose(fd);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
--- /dev/null
|
|
+++ b/dump2tar/src/tar.c
|
|
@@ -0,0 +1,270 @@
|
|
+/*
|
|
+ * dump2tar - tool to dump files and command output into a tar archive
|
|
+ *
|
|
+ * TAR file generation
|
|
+ *
|
|
+ * Copyright IBM Corp. 2016
|
|
+ */
|
|
+
|
|
+#include "tar.h"
|
|
+
|
|
+#include <stdio.h>
|
|
+#include <string.h>
|
|
+
|
|
+#include "buffer.h"
|
|
+#include "idcache.h"
|
|
+#include "misc.h"
|
|
+
|
|
+#define LONGLINK "././@LongLink"
|
|
+#define TYPE_LONGLINK 'K'
|
|
+#define TYPE_LONGNAME 'L'
|
|
+
|
|
+#define BLOCKSIZE 512
|
|
+
|
|
+/* Basic TAR header */
|
|
+struct tar_header {
|
|
+ char name[100];
|
|
+ char mode[8];
|
|
+ char uid[8];
|
|
+ char gid[8];
|
|
+ char size[12];
|
|
+ char mtime[12];
|
|
+ char chksum[8];
|
|
+ char typeflag;
|
|
+ char linkname[100];
|
|
+ char magic[6];
|
|
+ char version[2];
|
|
+ char uname[32];
|
|
+ char gname[32];
|
|
+ char devmajor[8];
|
|
+ char devminor[8];
|
|
+ char prefix[155];
|
|
+};
|
|
+
|
|
+/* Store the octal value of @value to at most @len bytes at @dest */
|
|
+static void set_octal(char *dest, size_t len, unsigned long value)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ dest[len - 1] = 0;
|
|
+ for (i = len - 2; i >= 0; i--) {
|
|
+ dest[i] = '0' + (value & 7);
|
|
+ value >>= 3;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Store time @value to at most @len bytes at @dest */
|
|
+static void set_time(char *dest, size_t len, time_t value)
|
|
+{
|
|
+ time_t max = (1ULL << (3 * (len - 1))) - 1;
|
|
+
|
|
+ if (value >= 0 && value <= max) {
|
|
+ set_octal(dest, len, value);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ for (; len > 0; len--) {
|
|
+ dest[len - 1] = value & 0xff;
|
|
+ value >>= 8;
|
|
+ }
|
|
+
|
|
+ dest[0] |= 0x80;
|
|
+}
|
|
+
|
|
+#define SET_FIELD(obj, name, value) \
|
|
+ set_octal((obj)->name, sizeof((obj)->name), (unsigned long) (value))
|
|
+#define SET_TIME_FIELD(obj, name, value) \
|
|
+ set_time((obj)->name, sizeof((obj)->name), (time_t) (value))
|
|
+#define SET_STR_FIELD(obj, name, value) \
|
|
+ strncpy((obj)->name, (value), sizeof((obj)->name))
|
|
+
|
|
+/* Initialize the tar file @header with the provided data */
|
|
+static void init_header(struct tar_header *header, const char *filename,
|
|
+ const char *link, size_t len, struct stat *stat,
|
|
+ char type)
|
|
+{
|
|
+ unsigned int i, checksum;
|
|
+ unsigned char *c;
|
|
+
|
|
+ memset(header, 0, sizeof(*header));
|
|
+
|
|
+ /* Fill in header fields */
|
|
+ SET_STR_FIELD(header, name, filename);
|
|
+ if (link)
|
|
+ SET_STR_FIELD(header, linkname, link);
|
|
+ SET_FIELD(header, size, len);
|
|
+ if (stat) {
|
|
+ SET_FIELD(header, mode, stat->st_mode & 07777);
|
|
+ SET_FIELD(header, uid, stat->st_uid);
|
|
+ SET_FIELD(header, gid, stat->st_gid);
|
|
+ SET_TIME_FIELD(header, mtime, stat->st_mtime);
|
|
+ uid_to_name(stat->st_uid, header->uname, sizeof(header->uname));
|
|
+ gid_to_name(stat->st_gid, header->gname, sizeof(header->gname));
|
|
+ } else {
|
|
+ SET_FIELD(header, mode, 0644);
|
|
+ SET_FIELD(header, uid, 0);
|
|
+ SET_FIELD(header, gid, 0);
|
|
+ SET_TIME_FIELD(header, mtime, 0);
|
|
+ uid_to_name(0, header->uname, sizeof(header->uname));
|
|
+ gid_to_name(0, header->gname, sizeof(header->gname));
|
|
+ }
|
|
+ header->typeflag = type;
|
|
+ memcpy(header->magic, "ustar ", sizeof(header->magic));
|
|
+ memcpy(header->version, " ", sizeof(header->version));
|
|
+
|
|
+ /* Calculate checksum */
|
|
+ memset(header->chksum, ' ', sizeof(header->chksum));
|
|
+ checksum = 0;
|
|
+ c = (unsigned char *) header;
|
|
+ for (i = 0; i < sizeof(*header); i++)
|
|
+ checksum += c[i];
|
|
+ snprintf(header->chksum, 7, "%06o", checksum);
|
|
+}
|
|
+
|
|
+/* Emit zero bytes via @emit_cb to pad @len to a multiple of BLOCKSIZE */
|
|
+static int emit_padding(emit_cb_t emit_cb, void *data, size_t len)
|
|
+{
|
|
+ size_t pad = BLOCKSIZE - len % BLOCKSIZE;
|
|
+ char zeroes[BLOCKSIZE];
|
|
+
|
|
+ if (len % BLOCKSIZE > 0) {
|
|
+ memset(zeroes, 0, BLOCKSIZE);
|
|
+ return emit_cb(data, zeroes, pad);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* Emit @len bytes at @addr via @emit_cb and pad data to BLOCKSIZE with zero
|
|
+ * bytes */
|
|
+static int emit_data(emit_cb_t emit_cb, void *data, void *addr, size_t len)
|
|
+{
|
|
+ int rc;
|
|
+
|
|
+ if (len == 0)
|
|
+ return 0;
|
|
+
|
|
+ rc = emit_cb(data, addr, len);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+ return emit_padding(emit_cb, data, len);
|
|
+}
|
|
+
|
|
+/* Emit a tar header via @emit_cb */
|
|
+static int emit_header(emit_cb_t emit_cb, void *data, char *filename,
|
|
+ char *link, size_t len, struct stat *stat, char type)
|
|
+{
|
|
+ struct tar_header header;
|
|
+ size_t namelen = strlen(filename);
|
|
+ size_t linklen;
|
|
+ int rc;
|
|
+
|
|
+ /* /proc can contain unreadable links which causes tar to complain
|
|
+ * during extract - use a dummy value to handle this more gracefully */
|
|
+ if (link && !*link)
|
|
+ link = " ";
|
|
+
|
|
+ linklen = link ? strlen(link) : 0;
|
|
+ if (linklen > sizeof(header.linkname)) {
|
|
+ rc = emit_header(emit_cb, data, LONGLINK, NULL, linklen + 1,
|
|
+ NULL, TYPE_LONGLINK);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+ rc = emit_data(emit_cb, data, link, linklen + 1);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+ }
|
|
+ if (namelen > sizeof(header.name)) {
|
|
+ rc = emit_header(emit_cb, data, LONGLINK, NULL, namelen + 1,
|
|
+ NULL, TYPE_LONGNAME);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+ rc = emit_data(emit_cb, data, filename, namelen + 1);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+ }
|
|
+
|
|
+ init_header(&header, filename, link, len, stat, type);
|
|
+ return emit_data(emit_cb, data, &header, sizeof(header));
|
|
+}
|
|
+
|
|
+struct emit_content_cb_data {
|
|
+ emit_cb_t emit_cb;
|
|
+ void *data;
|
|
+ size_t len;
|
|
+ int rc;
|
|
+};
|
|
+
|
|
+/* Callback for emitting a single chunk of data of a buffer */
|
|
+static int emit_content_cb(void *data, void *addr, size_t len)
|
|
+{
|
|
+ struct emit_content_cb_data *cb_data = data;
|
|
+
|
|
+ if (len > cb_data->len)
|
|
+ len = cb_data->len;
|
|
+ cb_data->len -= len;
|
|
+
|
|
+ cb_data->rc = cb_data->emit_cb(cb_data->data, addr, len);
|
|
+
|
|
+ if (cb_data->rc || cb_data->len == 0)
|
|
+ return 1;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* Emit at most @len bytes of contents of @buffer via @emit_cb and pad output
|
|
+ * to BLOCKSIZE with zero bytes */
|
|
+static int emit_content(emit_cb_t emit_cb, void *data, struct buffer *buffer,
|
|
+ size_t len)
|
|
+{
|
|
+ struct emit_content_cb_data cb_data;
|
|
+
|
|
+ cb_data.emit_cb = emit_cb;
|
|
+ cb_data.data = data;
|
|
+ cb_data.len = len;
|
|
+ cb_data.rc = 0;
|
|
+ buffer_iterate(buffer, emit_content_cb, &cb_data);
|
|
+ if (cb_data.rc)
|
|
+ return cb_data.rc;
|
|
+
|
|
+ return emit_padding(emit_cb, data, buffer->total);
|
|
+}
|
|
+
|
|
+/* Convert file meta data and content specified as @content into a
|
|
+ * stream of bytes that is reported via the @emit_cb callback. @data is
|
|
+ * passed through to the callback for arbitrary use. */
|
|
+int tar_emit_file_from_buffer(char *filename, char *link, size_t len,
|
|
+ struct stat *stat, char type,
|
|
+ struct buffer *content, emit_cb_t emit_cb,
|
|
+ void *data)
|
|
+{
|
|
+ int rc;
|
|
+
|
|
+ DBG("emit tar file=%s type=%d len=%zu", filename, type, len);
|
|
+ rc = emit_header(emit_cb, data, filename, link, len, stat, type);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+ if (content)
|
|
+ rc = emit_content(emit_cb, data, content, len);
|
|
+
|
|
+ return rc;
|
|
+}
|
|
+
|
|
+/* Convert file meta data and content specified as @addr and @len into a
|
|
+ * stream of bytes that is reported via the @emit_cb callback. @data is
|
|
+ * passed through to the callback for arbitrary use. */
|
|
+int tar_emit_file_from_data(char *filename, char *link, size_t len,
|
|
+ struct stat *stat, char type, void *addr,
|
|
+ emit_cb_t emit_cb, void *data)
|
|
+{
|
|
+ int rc;
|
|
+
|
|
+ DBG("emit tar file=%s type=%d len=%zu", filename, type, len);
|
|
+ rc = emit_header(emit_cb, data, filename, link, len, stat, type);
|
|
+ if (rc)
|
|
+ return rc;
|
|
+ if (addr)
|
|
+ rc = emit_data(emit_cb, data, addr, len);
|
|
+
|
|
+ return rc;
|
|
+}
|