From 3ce76e7dce877058730b160ced8f99cf8c06471fed097113ed711d02a75a2f82 Mon Sep 17 00:00:00 2001 From: Johannes Thumshirn Date: Fri, 12 May 2017 07:14:02 +0000 Subject: [PATCH] Accepting request 494787 from home:morbidrsa - Add possibility to change queue depth at connect time (bsc#1037297) + 0001-fabrics-add-option-to-override-drivers-queue-depth.patch + 0002-fabrics-add-option-to-override-drivers-queue-depth-a.patch OBS-URL: https://build.opensuse.org/request/show/494787 OBS-URL: https://build.opensuse.org/package/show/Base:System/nvme-cli?expand=0&rev=24 --- ...tion-to-override-drivers-queue-depth.patch | 85 +++++++++++++++++++ ...on-to-override-drivers-queue-depth-a.patch | 53 ++++++++++++ nvme-cli.changes | 7 ++ nvme-cli.spec | 4 + 4 files changed, 149 insertions(+) create mode 100644 0001-fabrics-add-option-to-override-drivers-queue-depth.patch create mode 100644 0002-fabrics-add-option-to-override-drivers-queue-depth-a.patch diff --git a/0001-fabrics-add-option-to-override-drivers-queue-depth.patch b/0001-fabrics-add-option-to-override-drivers-queue-depth.patch new file mode 100644 index 0000000..c1182a8 --- /dev/null +++ b/0001-fabrics-add-option-to-override-drivers-queue-depth.patch @@ -0,0 +1,85 @@ +From c5e4e6fce969ecd5e02ad6668bcbd1f950aebae6 Mon Sep 17 00:00:00 2001 +From: Johannes Thumshirn +Date: Wed, 10 May 2017 11:04:12 +0200 +Subject: fabrics: add option to override drivers queue depth +Git-commit: c5e4e6fce969ecd5e02ad6668bcbd1f950aebae6 +Patch-mainline: Staged for v1.3 +References: bsc#1037297 + +Currently it is not possible to override the fabrics drivers default queue +depth with the nvme userspace utility, but only when manually writing the +parameters to the /dev/nvme-fabrics character device. + +Add an option to override the drivers default queue depth for NVMe over +fabrics. + +Signed-off-by: Johannes Thumshirn +Signed-off-by: Keith Busch +--- + Documentation/nvme-connect.txt | 6 ++++++ + fabrics.c | 11 +++++++++++ + 2 files changed, 17 insertions(+) + +diff --git a/Documentation/nvme-connect.txt b/Documentation/nvme-connect.txt +index 38fae39..a746a3a 100644 +--- a/Documentation/nvme-connect.txt ++++ b/Documentation/nvme-connect.txt +@@ -16,6 +16,7 @@ SYNOPSIS + [--host-traddr= | -w ] + [--hostnqn= | -q ] + [--nr-io-queues=<#> | -i <#>] ++ [--queue-size=<#> | -Q <#>] + [--keep-alive-tmo=<#> | -k <#>] + [--reconnect-delay=<#> | -c <#>] + +@@ -73,6 +74,11 @@ OPTIONS + --nr-io-queues=<#>:: + Overrides the default number of I/O queues create by the driver. + ++-Q <#>:: ++--queue-size=<#>:: ++ Overrides the default number of elements in the I/O queues created ++ by the driver. ++ + -k <#>:: + --keep-alive-tmo=<#>:: + Overrides the default keep alive timeout (in seconds). +diff --git a/fabrics.c b/fabrics.c +index 6648bae..a826ecc 100644 +--- a/fabrics.c ++++ b/fabrics.c +@@ -52,6 +52,7 @@ static struct config { + char *host_traddr; + char *hostnqn; + char *nr_io_queues; ++ char *queue_size; + char *keep_alive_tmo; + char *reconnect_delay; + char *raw; +@@ -524,6 +525,15 @@ static int build_options(char *argstr, int max_len) + max_len -= len; + } + ++ if (cfg.queue_size) { ++ len = snprintf(argstr, max_len, ",queue_size=%s", ++ cfg.queue_size); ++ if (len < 0) ++ return -EINVAL; ++ argstr += len; ++ max_len -= len; ++ } ++ + if (cfg.keep_alive_tmo) { + len = snprintf(argstr, max_len, ",keep_alive_tmo=%s", cfg.keep_alive_tmo); + if (len < 0) +@@ -805,6 +815,7 @@ int connect(const char *desc, int argc, char **argv) + {"host-traddr", 'w', "LIST", CFG_STRING, &cfg.host_traddr, required_argument, "host traddr (e.g. FC WWN's)" }, + {"hostnqn", 'q', "LIST", CFG_STRING, &cfg.hostnqn, required_argument, "user-defined hostnqn" }, + {"nr-io-queues", 'i', "LIST", CFG_STRING, &cfg.nr_io_queues, required_argument, "number of io queues to use (default is core count)" }, ++ {"queue-size", 'Q', "LIST", CFG_STRING, &cfg.queue_size, required_argument, "number of io queue elements to use (default 128)" }, + {"keep-alive-tmo", 'k', "LIST", CFG_STRING, &cfg.keep_alive_tmo, required_argument, "keep alive timeout period in seconds" }, + {"reconnect-delay", 'c', "LIST", CFG_STRING, &cfg.reconnect_delay, required_argument, "reconnect timeout period in seconds" }, + {NULL}, +-- +2.12.0 + diff --git a/0002-fabrics-add-option-to-override-drivers-queue-depth-a.patch b/0002-fabrics-add-option-to-override-drivers-queue-depth-a.patch new file mode 100644 index 0000000..853121c --- /dev/null +++ b/0002-fabrics-add-option-to-override-drivers-queue-depth-a.patch @@ -0,0 +1,53 @@ +From 1d42c362af841aa3a67f9fd2be9c305df87dbbdb Mon Sep 17 00:00:00 2001 +From: Johannes Thumshirn +Date: Thu, 11 May 2017 19:44:30 +0200 +Subject: fabrics: add option to override drivers queue depth also for + connect-all command +Patch-mainline: Submitted, http://lists.infradead.org/pipermail/linux-nvme/2017-May/010144.html +References: bsc#1037297 + +Currently it is not possible to override the fabrics drivers default +queue depth with the nvme userspace utility's 'connect-all' command, +but only when using the 'connect' command. + +Add an option to override the drivers default queue depth for NVMe +over fabrics with the 'connect-all' command as we did previouslt with the +'connect' command. + +Signed-off-by: Johannes Thumshirn +--- + Documentation/nvme-connect-all.txt | 5 +++++ + fabrics.c | 1 + + 2 files changed, 6 insertions(+) + +diff --git a/Documentation/nvme-connect-all.txt b/Documentation/nvme-connect-all.txt +index b4b3e40..03adac5 100644 +--- a/Documentation/nvme-connect-all.txt ++++ b/Documentation/nvme-connect-all.txt +@@ -81,6 +81,11 @@ OPTIONS + and dump it to a raw binary file. By default 'nvme connect-all' will + dump the output to stdout. + ++-Q <#>:: ++--queue-size=<#>:: ++ Overrides the default number of elements in the I/O queues created ++ by the driver. ++ + EXAMPLES + -------- + * Connect to all records returned by the Discover Controller with IP4 address +diff --git a/fabrics.c b/fabrics.c +index a826ecc..bbcca47 100644 +--- a/fabrics.c ++++ b/fabrics.c +@@ -782,6 +782,7 @@ int discover(const char *desc, int argc, char **argv, bool connect) + {"trsvcid", 's', "LIST", CFG_STRING, &cfg.trsvcid, required_argument, "transport service id (e.g. IP port)" }, + {"host-traddr", 'w', "LIST", CFG_STRING, &cfg.host_traddr, required_argument, "host traddr (e.g. FC WWN's)" }, + {"hostnqn", 'q', "LIST", CFG_STRING, &cfg.hostnqn, required_argument, "user-defined hostnqn (if default not used)" }, ++ {"queue-size", 'Q', "LIST", CFG_STRING, &cfg.queue_size, required_argument, "number of io queue elements to use (default 128)" }, + {"raw", 'r', "LIST", CFG_STRING, &cfg.raw, required_argument, "raw output file" }, + {NULL}, + }; +-- +2.12.0 + diff --git a/nvme-cli.changes b/nvme-cli.changes index 8902bbc..3303105 100644 --- a/nvme-cli.changes +++ b/nvme-cli.changes @@ -1,3 +1,10 @@ +------------------------------------------------------------------- +Fri May 12 07:10:37 UTC 2017 - jthumshirn@suse.com + +- Add possibility to change queue depth at connect time (bsc#1037297) + + 0001-fabrics-add-option-to-override-drivers-queue-depth.patch + + 0002-fabrics-add-option-to-override-drivers-queue-depth-a.patch + ------------------------------------------------------------------- Mon Apr 24 07:33:25 UTC 2017 - jthumshirn@suse.com diff --git a/nvme-cli.spec b/nvme-cli.spec index 14e8726..75db950 100644 --- a/nvme-cli.spec +++ b/nvme-cli.spec @@ -26,6 +26,8 @@ Url: https://github.com/linux-nvme/nvme-cli Source: %{name}-v%{version}.tar.gz BuildRequires: pkgconfig(libudev) BuildRoot: %{_tmppath}/%{name}-%{version}-build +Patch0: 0001-fabrics-add-option-to-override-drivers-queue-depth.patch +Patch1: 0002-fabrics-add-option-to-override-drivers-queue-depth-a.patch %description NVMe is a fast, scalable, direct attached storage interface. The nvme @@ -33,6 +35,8 @@ cli rpm installs core management tools with minimal dependencies. %prep %setup -q -n %{name}-v%{version} +%patch0 -p1 +%patch1 -p1 %build echo %{version} > version