SHA256
1
0
forked from pool/lvm2

Accepting request 426139 from home:scarabeus_iv:branches:Base:System

- This is sync commit from Leap/SLE12 only
- Add patch pvcreate-enhance-the-error-message.patch bsc#960744
- Modify GPL-2.0 to GPL-2.0+ and LGPL-2.1 to LGPL-2.1+ to avoid
  license conflict with thin-provisioning-tools which is using GPL-3.0
- Also contains fix for bsc#969310
- Fix clvmd.ocf and cmirrord to remove "-d" option for cmirrod
  (bsc#971334)
- Fix clvmd.ocf to add lvmconf --enable-cluster before start daemon
  when locking_type is not 3 or use_lvmetad is 1 in setting
  (bsc#970439)
- Modified spec to enable blkid-wiping (fate#319908)
- Fix clvmd binary not found in case that users still use RA from
  ocf:lvm2:clvm (bsc#980200) 
  Add sbindir=$HA_SBIN_DIR for clvmd.ocf and cmirrord.ocf
- The bsc#979635 wnd bsc#991181 as fixed in past thanks to proper /usr
  migration code
- Modified raid10_segtype_default from "mirror" to "raid10"(bsc#982329)
- Remove lvm2-clvmd/cmirrord.service and related activation services
  from %service_add _pre/post/preun/postun because we start clvmd 
  /clmirrord and activate via pacemaker and RA. (bsc#980296)
- Lvchange improve refresh by trying to deactivate snapshot thinLV 
  in case it's preventing merge process change integrated upstream.
  (bsc#984321)
- Fixed in past bsc#992843
- Fixed by upstream bsc#984321
- Fixed by upstream bsc#970943
- 69-dm-lvm-metad.rules: Do not process rules for multipath
  devices (bsc#bsc#990538, bsc#986734)
  Add: 69-dm-lvm-metad.rules-Do-not-process-rules-for-multi.patch
- Rewrite patches to include patch header:

OBS-URL: https://build.opensuse.org/request/show/426139
OBS-URL: https://build.opensuse.org/package/show/Base:System/lvm2?expand=0&rev=158
This commit is contained in:
2016-09-14 09:43:37 +00:00
committed by Git OBS Bridge
parent cd69f1258b
commit 15510c0f84
16 changed files with 702 additions and 106 deletions

436
lvm.conf
View File

@@ -23,6 +23,9 @@ config {
# If enabled, any configuration mismatch aborts the LVM2 process.
abort_on_errors = 0
# Directory where LVM looks for configuration profiles.
profile_dir = "/etc/lvm/profile"
}
# This section allows you to configure which block devices should
@@ -50,11 +53,30 @@ devices {
# same block device and the tools need to display a name for device,
# all the pathnames are matched against each item in the following
# list of regular expressions in turn and the first match is used.
preferred_names = [ ]
# By default no preferred names are defined.
# preferred_names = [ ]
# Try to avoid using undescriptive /dev/dm-N names, if present.
# preferred_names = [ "^/dev/mpath/", "^/dev/mapper/mpath", "^/dev/[hs]d" ]
# In case no prefererred name matches or if preferred_names are not
# defined at all, builtin rules are used to determine the preference.
#
# The first builtin rule checks path prefixes and it gives preference
# based on this ordering (where "dev" depends on devices/dev setting):
# /dev/mapper > /dev/disk > /dev/dm-* > /dev/block
#
# If the ordering above cannot be applied, the path with fewer slashes
# gets preference then.
#
# If the number of slashes is the same, a symlink gets preference.
#
# Finally, if all the rules mentioned above are not applicable,
# lexicographical order is used over paths and the smallest one
# of all gets preference.
# A filter that tells LVM2 to only use a restricted set of devices.
# The filter consists of an array of regular expressions. These
# expressions can be delimited by a character of your choice, and
@@ -72,7 +94,7 @@ devices {
# accepted.
# By default we accept every block device except udev names, floppy and cdrom drives:
filter = [ "r|/dev/.*/by-path/.*|", "r|/dev/.*/by-id/.*|","r|/dev/fd.*|", "r|/dev/cdrom|", "a/.*/" ]
filter = [ "r|/dev/.*/by-path/.*|", "r|/dev/.*/by-id/.*|", "r|/dev/fd.*|", "r|/dev/cdrom|", "a/.*/" ]
# Exclude the cdrom drive
# filter = [ "r|/dev/cdrom|" ]
@@ -86,7 +108,7 @@ devices {
# Use anchors if you want to be really specific
# filter = [ "a|^/dev/hda8$|", "r/.*/" ]
# Since "filter" is often overriden from command line, it is not suitable
# Since "filter" is often overridden from command line, it is not suitable
# for system-wide device filtering (udev rules, lvmetad). To hide devices
# from LVM-specific udev processing and/or from lvmetad, you need to set
# global_filter. The syntax is the same as for normal "filter"
@@ -119,7 +141,7 @@ devices {
# If sysfs is mounted (2.6 kernels) restrict device scanning to
# the block devices it believes are valid.
# 1 enables; 0 disables.
sysfs_scan = 1
sysfs_scan = 1
# By default, LVM2 will ignore devices used as component paths
# of device-mapper multipath devices.
@@ -174,6 +196,35 @@ devices {
# in recovery situations.
ignore_suspended_devices = 0
# ignore_lvm_mirrors: Introduced in version 2.02.104
# This setting determines whether logical volumes of "mirror" segment
# type are scanned for LVM labels. This affects the ability of
# mirrors to be used as physical volumes. If 'ignore_lvm_mirrors'
# is set to '1', it becomes impossible to create volume groups on top
# of mirror logical volumes - i.e. to stack volume groups on mirrors.
#
# Allowing mirror logical volumes to be scanned (setting the value to '0')
# can potentially cause LVM processes and I/O to the mirror to become
# blocked. This is due to the way that the "mirror" segment type handles
# failures. In order for the hang to manifest itself, an LVM command must
# be run just after a failure and before the automatic LVM repair process
# takes place OR there must be failures in multiple mirrors in the same
# volume group at the same time with write failures occurring moments
# before a scan of the mirror's labels.
#
# Note that these scanning limitations do not apply to the LVM RAID
# types, like "raid1". The RAID segment types handle failures in a
# different way and are not subject to possible process or I/O blocking.
#
# It is encouraged that users set 'ignore_lvm_mirrors' to 1 if they
# are using the "mirror" segment type. Users that require volume group
# stacking on mirrored logical volumes should consider using the "raid1"
# segment type. The "raid1" segment type is not available for
# active/active clustered volume groups.
#
# Set to 1 to disallow stacking and thereby avoid a possible deadlock.
ignore_lvm_mirrors = 1
# During each LVM operation errors received from each device are counted.
# If the counter of a particular device exceeds the limit set here, no
# further I/O is sent to that device for the remainder of the respective
@@ -231,24 +282,78 @@ allocation {
# algorithm.
maximise_cling = 1
# Whether to use blkid library instead of native LVM2 code to detect
# any existing signatures while creating new Physical Volumes and
# Logical Volumes. LVM2 needs to be compiled with blkid wiping support
# for this setting to take effect.
#
# LVM2 native detection code is currently able to recognize these signatures:
# - MD device signature
# - swap signature
# - LUKS signature
# To see the list of signatures recognized by blkid, check the output
# of 'blkid -k' command. The blkid can recognize more signatures than
# LVM2 native detection code, but due to this higher number of signatures
# to be recognized, it can take more time to complete the signature scan.
use_blkid_wiping = 1
# Set to 1 to wipe any signatures found on newly-created Logical Volumes
# automatically in addition to zeroing of the first KB on the LV
# (controlled by the -Z/--zero y option).
# The command line option -W/--wipesignatures takes precedence over this
# setting.
# The default is to wipe signatures when zeroing.
#
wipe_signatures_when_zeroing_new_lvs = 1
# Set to 1 to guarantee that mirror logs will always be placed on
# different PVs from the mirror images. This was the default
# until version 2.02.85.
mirror_logs_require_separate_pvs = 0
# Set to 1 to guarantee that cache_pool metadata will always be
# placed on different PVs from the cache_pool data.
cache_pool_metadata_require_separate_pvs = 0
# Specify the minimal chunk size (in kiB) for cache pool volumes.
# Using a chunk_size that is too large can result in wasteful use of
# the cache, where small reads and writes can cause large sections of
# an LV to be mapped into the cache. However, choosing a chunk_size
# that is too small can result in more overhead trying to manage the
# numerous chunks that become mapped into the cache. The former is
# more of a problem than the latter in most cases, so we default to
# a value that is on the smaller end of the spectrum. Supported values
# range from 32(kiB) to 1048576 in multiples of 32.
# cache_pool_chunk_size = 64
# Set to 1 to guarantee that thin pool metadata will always
# be placed on different PVs from the pool data.
thin_pool_metadata_require_separate_pvs = 0
# Specify chunk size calculation policy for thin pool volumes.
# Possible options are:
# "generic" - if thin_pool_chunk_size is defined, use it.
# Otherwise, calculate the chunk size based on
# estimation and device hints exposed in sysfs:
# the minimum_io_size. The chunk size is always
# at least 64KiB.
#
# "performance" - if thin_pool_chunk_size is defined, use it.
# Otherwise, calculate the chunk size for
# performance based on device hints exposed in
# sysfs: the optimal_io_size. The chunk size is
# always at least 512KiB.
# thin_pool_chunk_size_policy = "generic"
# Specify the minimal chunk size (in KB) for thin pool volumes.
# Use of the larger chunk size may improve perfomance for plain
# Use of the larger chunk size may improve performance for plain
# thin volumes, however using them for snapshot volumes is less efficient,
# as it consumes more space and takes extra time for copying.
# When unset, lvm tries to estimate chunk size starting from 64KB
# Supported values are in range from 64 to 1048576.
# thin_pool_chunk_size = 64
# Specify discards behavior of the thin pool volume.
# Specify discards behaviour of the thin pool volume.
# Select one of "ignore", "nopassdown", "passdown"
# thin_pool_discards = "passdown"
@@ -293,7 +398,7 @@ log {
# There are 6 syslog-like log levels currently in use - 2 to 7 inclusive.
# 7 is the most verbose (LOG_DEBUG).
level = 0
# Format of output messages
# Whether or not (1 or 0) to indent messages according to their severity
indent = 1
@@ -321,14 +426,13 @@ log {
# memory, devices, activation, allocation, lvmetad, metadata, cache,
# locking
# Use "all" to see everything.
debug_classes = [ "memory", "devices", "activation", "allocation",
"lvmetad", "metadata", "cache", "locking" ]
debug_classes = [ "memory", "devices", "activation", "allocation", "lvmetad", "metadata", "cache", "locking" ]
}
# Configuration of metadata backups and archiving. In LVM2 when we
# talk about a 'backup' we mean making a copy of the metadata for the
# *current* system. The 'archive' contains old metadata configurations.
# Backups are stored in a human readeable text format.
# Backups are stored in a human readable text format.
backup {
# Should we maintain a backup of the current metadata configuration ?
@@ -366,7 +470,6 @@ shell {
# Miscellaneous global LVM2 settings
global {
# The file creation mask for any files and directories created.
# Interpreted as octal if the first digit is zero.
umask = 077
@@ -389,6 +492,11 @@ global {
# temporarily until you update them.
si_unit_consistency = 1
# Whether or not to display unit suffix for sizes. This setting has
# no effect if the units are in human-readable form (global/units="h")
# in which case the suffix is always displayed.
suffix = 1
# Whether or not to communicate with the kernel device-mapper.
# Set to 0 if you want to use the tools to manipulate LVM metadata
# without activating any logical volumes.
@@ -421,6 +529,19 @@ global {
# Type 3 uses built-in clustered locking.
# Type 4 uses read-only locking which forbids any operations that might
# change metadata.
# Type 5 offers dummy locking for tools that do not need any locks.
# You should not need to set this directly: the tools will select when
# to use it instead of the configured locking_type. Do not use lvmetad or
# the kernel device-mapper driver with this locking type.
# It is used by the --readonly option that offers read-only access to
# Volume Group metadata that cannot be locked safely because it belongs to
# an inaccessible domain and might be in use, for example a virtual machine
# image or a disk that is shared by a clustered machine.
#
# N.B. Don't use lvmetad with locking type 3 as lvmetad is not yet
# supported in clustered environment. If use_lvmetad=1 and locking_type=3
# is set at the same time, LVM always issues a warning message about this
# and then it automatically disables lvmetad use.
locking_type = 1
# Set to 0 to fail when a lock request cannot be satisfied immediately.
@@ -516,11 +637,11 @@ global {
# "mirror" - LVM will layer the 'mirror' and 'stripe' segment types. It
# will do this by creating a mirror on top of striped sub-LVs;
# effectively creating a RAID 0+1 array. This is suboptimal
# in terms of providing redunancy and performance. Changing to
# in terms of providing redundancy and performance. Changing to
# this setting is not advised.
# Specify the '--type <raid10|mirror>' option to override this default
# setting.
raid10_segtype_default = "mirror"
raid10_segtype_default = "raid10"
# The default format for displaying LV names in lvdisplay was changed
# in version 2.02.89 to show the LV name and path separately.
@@ -532,15 +653,38 @@ global {
# Whether to use (trust) a running instance of lvmetad. If this is set to
# 0, all commands fall back to the usual scanning mechanisms. When set to 1
# *and* when lvmetad is running (it is not auto-started), the volume group
# metadata and PV state flags are obtained from the lvmetad instance and no
# scanning is done by the individual commands. In a setup with lvmetad,
# lvmetad udev rules *must* be set up for LVM to work correctly. Without
# proper udev rules, all changes in block device configuration will be
# *ignored* until a manual 'pvscan --cache' is performed.
# *and* when lvmetad is running (automatically instantiated by making use of
# systemd's socket-based service activation or run as an initscripts service
# or run manually), the volume group metadata and PV state flags are obtained
# from the lvmetad instance and no scanning is done by the individual
# commands. In a setup with lvmetad, lvmetad udev rules *must* be set up for
# LVM to work correctly. Without proper udev rules, all changes in block
# device configuration will be *ignored* until a manual 'pvscan --cache'
# is performed. These rules are installed by default.
#
# If lvmetad has been running while use_lvmetad was 0, it MUST be stopped
# before changing use_lvmetad to 1 and started again afterwards.
#
# If using lvmetad, the volume activation is also switched to automatic
# event-based mode. In this mode, the volumes are activated based on
# incoming udev events that automatically inform lvmetad about new PVs
# that appear in the system. Once the VG is complete (all the PVs are
# present), it is auto-activated. The activation/auto_activation_volume_list
# setting controls which volumes are auto-activated (all by default).
#
# A note about device filtering while lvmetad is used:
# When lvmetad is updated (either automatically based on udev events
# or directly by pvscan --cache <device> call), the devices/filter
# is ignored and all devices are scanned by default. The lvmetad always
# keeps unfiltered information which is then provided to LVM commands
# and then each LVM command does the filtering based on devices/filter
# setting itself.
# To prevent scanning devices completely, even when using lvmetad,
# the devices/global_filter must be used.
# N.B. Don't use lvmetad with locking type 3 as lvmetad is not yet
# supported in clustered environment. If use_lvmetad=1 and locking_type=3
# is set at the same time, LVM always issues a warning message about this
# and then it automatically disables lvmetad use.
# If use_lvmetad set to 1, please make sure lvm2-lvmetad.socket is started
use_lvmetad = 0
@@ -553,11 +697,31 @@ global {
# The thin tools are available as part of the device-mapper-persistent-data
# package from https://github.com/jthornber/thin-provisioning-tools.
#
thin_check_executable = ""
# thin_check_executable = "/usr/sbin/thin_check"
# String with options passed with thin_check command. By default,
# option '-q' is for quiet output.
thin_check_options = [ "-q" ]
# Array of string options passed with thin_check command. By default,
# option "-q" is for quiet output.
# With thin_check version 2.1 or newer you can add "--ignore-non-fatal-errors"
# to let it pass through ignorable errors and fix them later.
# With thin_check version 3.2 or newer you should add
# "--clear-needs-check-flag".
#
# thin_check_options = [ "-q", "--clear-needs-check-flag" ]
# Full path of the utility called to repair a thin metadata device
# is in a state that allows it to be used.
# Each time a thin pool needs repair this utility is executed.
# See thin_check_executable how to obtain binaries.
#
# thin_repair_executable = "/usr/sbin/thin_repair"
# Array of extra string options passed with thin_repair command.
# thin_repair_options = [ "" ]
# Full path of the utility called to dump thin metadata content.
# See thin_check_executable how to obtain binaries.
#
# thin_dump_executable = "/usr/sbin/thin_dump"
# If set, given features are not used by thin driver.
# This can be helpful not just for testing, but i.e. allows to avoid
@@ -566,8 +730,41 @@ global {
# block_size
# discards
# discards_non_power_2
# external_origin
# metadata_resize
# external_origin_extend
#
# thin_disabled_features = [ "discards", "block_size" ]
# Full path of the utility called to check that a cache metadata device
# is in a state that allows it to be used.
# Each time a cached LV needs to be used or after it is deactivated
# this utility is executed. The activation will only proceed if the utility
# has an exit status of 0.
# Set to "" to skip this check. (Not recommended.)
# The cache tools are available as part of the device-mapper-persistent-data
# package from https://github.com/jthornber/thin-provisioning-tools.
#
# cache_check_executable = "autodetect"
# Array of string options passed with cache_check command. By default,
# option "-q" is for quiet output.
#
# cache_check_options = [ "-q" ]
# Full path of the utility called to repair a cache metadata device.
# Each time a cache metadata needs repair this utility is executed.
# See cache_check_executable how to obtain binaries.
#
# cache_repair_executable = "autodetect"
# Array of extra string options passed with cache_repair command.
# cache_repair_options = [ "" ]
# Full path of the utility called to dump cache metadata content.
# See cache_check_executable how to obtain binaries.
#
# cache_dump_executable = "autodetect"
}
activation {
@@ -641,9 +838,36 @@ activation {
# volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
# If auto_activation_volume_list is defined, each LV that is to be
# activated with the autoactivation option (--activate ay/-a ay)
# is first checked against the list. If it does not match, the LV
# is not activated. This list is checked as well as volume_list.
# activated with the autoactivation option (--activate ay/-a ay) is
# first checked against the list. There are two scenarios in which
# the autoactivation option is used:
#
# - automatic activation of volumes based on incoming PVs. If all the
# PVs making up a VG are present in the system, the autoactivation
# is triggered. This requires lvmetad (global/use_lvmetad=1) and udev
# to be running. In this case, "pvscan --cache -aay" is called
# automatically without any user intervention while processing
# udev events. Please, make sure you define auto_activation_volume_list
# properly so only the volumes you want and expect are autoactivated.
#
# - direct activation on command line with the autoactivation option.
# In this case, the user calls "vgchange --activate ay/-a ay" or
# "lvchange --activate ay/-a ay" directly.
#
# By default, the auto_activation_volume_list is not defined and all
# volumes will be activated either automatically or by using --activate ay/-a ay.
#
# N.B. The "activation/volume_list" is still honoured in all cases so even
# if the VG/LV passes the auto_activation_volume_list, it still needs to
# pass the volume_list for it to be activated in the end.
# If auto_activation_volume_list is defined but empty, no volumes will be
# activated automatically and --activate ay/-a ay will do nothing.
#
# auto_activation_volume_list = []
# If auto_activation_volume_list is defined and it's not empty, only matching
# volumes will be activated either automatically or by using --activate ay/-a ay.
#
# "vgname" and "vgname/lvname" are matched exactly.
# "@tag" matches any tag set in the LV or VG.
@@ -662,8 +886,15 @@ activation {
#
# read_only_volume_list = [ "vg1", "vg2/lvol1", "@tag1", "@*" ]
# Each LV can have an 'activation skip' flag stored persistently against it.
# During activation, this flag is used to decide whether such an LV is skipped.
# The 'activation skip' flag can be set during LV creation and by default it
# is automatically set for thin snapshot LVs. The 'auto_set_activation_skip'
# enables or disables this automatic setting of the flag while LVs are created.
# auto_set_activation_skip = 1
# For RAID or 'mirror' segment types, 'raid_region_size' is the
# size (in kiB) of each:
# size (in KiB) of each:
# - synchronization operation when initializing
# - each copy operation when performing a 'pvmove' (using 'mirror' segtype)
# This setting has replaced 'mirror_region_size' since version 2.02.99
@@ -733,7 +964,6 @@ activation {
# since it would break the redundant nature of the mirror. This
# policy acts like "remove" if no suitable device and space can
# be allocated for the replacement.
mirror_log_fault_policy = "allocate"
mirror_image_fault_policy = "remove"
@@ -801,8 +1031,140 @@ activation {
# are no progress reports, but the process is awoken immediately the
# operation is complete.
polling_interval = 15
# 'activation_mode' determines how Logical Volumes are activated if
# any devices are missing. Possible settings are:
#
# "complete" - Only allow activation of an LV if all of the Physical
# Volumes it uses are present. Other PVs in the Volume
# Group may be missing.
#
# "degraded" - Like "complete", but additionally RAID Logical Volumes of
# segment type raid1, raid4, raid5, radid6 and raid10 will
# be activated if there is no data loss, i.e. they have
# sufficient redundancy to present the entire addressable
# range of the Logical Volume.
#
# "partial" - Allows the activation of any Logical Volume even if
# a missing or failed PV could cause data loss with a
# portion of the Logical Volume inaccessible.
# This setting should not normally be used, but may
# sometimes assist with data recovery.
#
# This setting was introduced in LVM version 2.02.108. It corresponds
# with the '--activationmode' option for lvchange and vgchange.
activation_mode = "degraded"
}
# Report settings.
#
# report {
# Align columns on report output.
# aligned=1
# When buffered reporting is used, the report's content is appended
# incrementally to include each object being reported until the report
# is flushed to output which normally happens at the end of command
# execution. Otherwise, if buffering is not used, each object is
# reported as soon as its processing is finished.
# buffered=1
# Show headings for columns on report.
# headings=1
# A separator to use on report after each field.
# separator=" "
# A separator to use for list items when reported.
# list_item_separator=","
# Use a field name prefix for each field reported.
# prefixes=0
# Quote field values when using field name prefixes.
# quoted=1
# Output each column as a row. If set, this also implies report/prefixes=1.
# colums_as_rows=0
# Use binary values "0" or "1" instead of descriptive literal values for
# columns that have exactly two valid values to report (not counting the
# "unknown" value which denotes that the value could not be determined).
#
# binary_values_as_numeric = 0
# Comma separated list of columns to sort by when reporting 'lvm devtypes' command.
# See 'lvm devtypes -o help' for the list of possible fields.
# devtypes_sort="devtype_name"
# Comma separated list of columns to report for 'lvm devtypes' command.
# See 'lvm devtypes -o help' for the list of possible fields.
# devtypes_cols="devtype_name,devtype_max_partitions,devtype_description"
# Comma separated list of columns to report for 'lvm devtypes' command in verbose mode.
# See 'lvm devtypes -o help' for the list of possible fields.
# devtypes_cols_verbose="devtype_name,devtype_max_partitions,devtype_description"
# Comma separated list of columns to sort by when reporting 'lvs' command.
# See 'lvs -o help' for the list of possible fields.
# lvs_sort="vg_name,lv_name"
# Comma separated list of columns to report for 'lvs' command.
# See 'lvs -o help' for the list of possible fields.
# lvs_cols="lv_name,vg_name,lv_attr,lv_size,pool_lv,origin,data_percent,metadata_percent,move_pv,mirror_log,copy_percent,convert_lv"
# Comma separated list of columns to report for 'lvs' command in verbose mode.
# See 'lvs -o help' for the list of possible fields.
# lvs_cols_verbose="lv_name,vg_name,seg_count,lv_attr,lv_size,lv_major,lv_minor,lv_kernel_major,lv_kernel_minor,pool_lv,origin,data_percent,metadata_percent,move_pv,copy_percent,mirror_log,convert
# Comma separated list of columns to sort by when reporting 'vgs' command.
# See 'vgs -o help' for the list of possible fields.
# vgs_sort="vg_name"
# Comma separated list of columns to report for 'vgs' command.
# See 'vgs -o help' for the list of possible fields.
# vgs_cols="vg_name,pv_count,lv_count,snap_count,vg_attr,vg_size,vg_free"
# Comma separated list of columns to report for 'vgs' command in verbose mode.
# See 'vgs -o help' for the list of possible fields.
# vgs_cols_verbose="vg_name,vg_attr,vg_extent_size,pv_count,lv_count,snap_count,vg_size,vg_free,vg_uuid,vg_profile"
# Comma separated list of columns to sort by when reporting 'pvs' command.
# See 'pvs -o help' for the list of possible fields.
# pvs_sort="pv_name"
# Comma separated list of columns to report for 'pvs' command.
# See 'pvs -o help' for the list of possible fields.
# pvs_cols="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free"
# Comma separated list of columns to report for 'pvs' command in verbose mode.
# See 'pvs -o help' for the list of possible fields.
# pvs_cols_verbose="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,dev_size,pv_uuid"
# Comma separated list of columns to sort by when reporting 'lvs --segments' command.
# See 'lvs --segments -o help' for the list of possible fields.
# segs_sort="vg_name,lv_name,seg_start"
# Comma separated list of columns to report for 'lvs --segments' command.
# See 'lvs --segments -o help' for the list of possible fields.
# segs_cols="lv_name,vg_name,lv_attr,stripes,segtype,seg_size"
# Comma separated list of columns to report for 'lvs --segments' command in verbose mode.
# See 'lvs --segments -o help' for the list of possible fields.
# segs_cols_verbose="lv_name,vg_name,lv_attr,seg_start,seg_size,stripes,segtype,stripesize,chunksize"
# Comma separated list of columns to sort by when reporting 'pvs --segments' command.
# See 'pvs --segments -o help' for the list of possible fields.
# pvsegs_sort="pv_name,pvseg_start"
# Comma separated list of columns to sort by when reporting 'pvs --segments' command.
# See 'pvs --segments -o help' for the list of possible fields.
# pvsegs_cols="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size"
# Comma separated list of columns to sort by when reporting 'pvs --segments' command in verbose mode.
# See 'pvs --segments -o help' for the list of possible fields.
# pvsegs_cols_verbose="pv_name,vg_name,pv_fmt,pv_attr,pv_size,pv_free,pvseg_start,pvseg_size,lv_name,seg_start_pe,segtype,seg_pe_ranges"
#}
####################
# Advanced section #
@@ -812,11 +1174,23 @@ activation {
#
# metadata {
# Default number of copies of metadata to hold on each PV. 0, 1 or 2.
# It's best to leave this at 2.
# You might want to override it from the command line with 0 or 1
# You might want to override it from the command line with 0
# when running pvcreate on new PVs which are to be added to large VGs.
# pvmetadatacopies = 2
# pvmetadatacopies = 1
# Default number of copies of metadata to maintain for each VG.
# If set to a non-zero value, LVM automatically chooses which of
# the available metadata areas to use to achieve the requested
# number of copies of the VG metadata. If you set a value larger
# than the the total number of metadata areas available then
# metadata is stored in them all.
# The default value of 0 ("unmanaged") disables this automatic
# management and allows you to control which metadata areas
# are used at the individual PV level using 'pvchange
# --metadataignore y/n'.
# vgmetadatacopies = 0
# Approximate default size of on-disk metadata areas in sectors.
# You should increase this if you have large volume groups or