python-botocore/remove_nose.patch
Robert Schweikert 1514cb16bb Accepting request 860210 from home:bnavigator:branches:devel:languages:python:aws
- Update to 1.19.47
  * api-change:servicecatalog: Update servicecatalog client to 
    latest version
- Changes in 1.19.46
  * api-change:macie2: Update macie2 client to latest version
  * api-change:elasticache: Update elasticache client to latest 
    version
- Changes in 1.19.45
  * api-change:acm-pca: Update acm-pca client to latest version
  * api-change:apigatewayv2: Update apigatewayv2 client to latest 
    version
- Changes in 1.19.44
  * api-change:cloudfront: Update cloudfront client to latest 
    version
- Changes in 1.19.43
  * api-change:compute-optimizer: Update compute-optimizer client 
    to latest version
  * api-change:resource-groups: Update resource-groups client to 
    latest version
  * api-change:dms: Update dms client to latest version
- Changes in 1.19.42
  * api-change:ssm: Update ssm client to latest version
  * api-change:iotwireless: Update iotwireless client to latest 
    version
  * api-change:rds: Update rds client to latest version
  * api-change:glue: Update glue client to latest version
  * api-change:ce: Update ce client to latest version
  * api-change:connect: Update connect client to latest version
  * api-change:elasticache: Update elasticache client to latest 
    version
- Changes in 1.19.41
  * api-change:config: Update config client to latest version
  * api-change:ec2: Update ec2 client to latest version
  * api-change:glue: Update glue client to latest version
  * api-change:batch: Update batch client to latest version
  * api-change:managedblockchain: Update managedblockchain client 
    to latest version
  * api-change:service-quotas: Update service-quotas client to 
    latest version
  * api-change:s3: Update s3 client to latest version
  * api-change:connectparticipant: Update connectparticipant 
    client to latest version
  * api-change:securityhub: Update securityhub client to latest 
    version
  * api-change:qldb-session: Update qldb-session client to latest 
    version
  * api-change:outposts: Update outposts client to latest version
  * api-change:servicecatalog-appregistry: Update servicecatalog-
    appregistry client to latest version
  * api-change:dms: Update dms client to latest version
  * api-change:apigateway: Update apigateway client to latest 
    version
- Changes in 1.19.40
  * api-change:rds: Update rds client to latest version
  * bugfix:SSO: Fixed timestamp format for SSO credential 
    expirations
  * api-change:personalize-runtime: Update personalize-runtime 
    client to latest version
  * api-change:ec2: Update ec2 client to latest version
- Changes in 1.19.39
  * api-change:ec2: Update ec2 client to latest version
  * api-change:servicecatalog: Update servicecatalog client to 
    latest version
  * api-change:dlm: Update dlm client to latest version
  * api-change:kms: Update kms client to latest version
  * api-change:route53resolver: Update route53resolver client to 
    latest version
  * api-change:sqs: Update sqs client to latest version
  * api-change:config: Update config client to latest version
  * api-change:imagebuilder: Update imagebuilder client to latest 
    version
  * api-change:route53: Update route53 client to latest version
- Changes in 1.19.38
  * api-change:ce: Update ce client to latest version
  * api-change:amp: Update amp client to latest version
  * api-change:location: Update location client to latest version
  * api-change:wellarchitected: Update wellarchitected client to 
    latest version
  * api-change:quicksight: Update quicksight client to latest 
    version
- Changes in 1.19.37
  * api-change:iotwireless: Update iotwireless client to latest 
   version
  * api-change:lambda: Update lambda client to latest version
  * api-change:greengrassv2: Update greengrassv2 client to latest 
    version
  * api-change:ssm: Update ssm client to latest version
  * api-change:iotdeviceadvisor: Update iotdeviceadvisor client to 
    latest version
  * api-change:iot: Update iot client to latest version
  * api-change:iotanalytics: Update iotanalytics client to latest 
    version
  * api-change:amp: Update amp client to latest version
  * api-change:iotfleethub: Update iotfleethub client to latest 
    version
- Changes in 1.19.36
  * api-change:ec2: Update ec2 client to latest version
  * api-change:globalaccelerator: Update globalaccelerator client 
    to latest version
  * api-change:devops-guru: Update devops-guru client to latest 
    version
- Changes in 1.19.35
  * api-change:guardduty: Update guardduty client to latest 
    version
  * api-change:iotsitewise: Update iotsitewise client to latest 
    version
  * api-change:autoscaling: Update autoscaling client to latest 
    version
  * api-change:cloudwatch: Update cloudwatch client to latest 
    version
  * api-change:pi: Update pi client to latest version
  * api-change:cloudtrail: Update cloudtrail client to latest 
    version
- Changes in 1.19.34
  * api-change:networkmanager: Update networkmanager client to 
    latest version
  * api-change:kendra: Update kendra client to latest version
  * api-change:ec2: Update ec2 client to latest version
- Changes in 1.19.33
  * api-change:globalaccelerator: Update globalaccelerator client 
    to latest version
  * api-change:ec2: Update ec2 client to latest version
  * api-change:redshift: Update redshift client to latest version
- Changes in 1.19.32
  * api-change:ecr: Update ecr client to latest version
  * api-change:sagemaker: Update sagemaker client to latest 
    version
  * api-change:kendra: Update kendra client to latest version
  * api-change:quicksight: Update quicksight client to latest 
    version
  * api-change:auditmanager: Update auditmanager client to latest 
    version
  * api-change:sagemaker-runtime: Update sagemaker-runtime client 
    to latest version
  * api-change:sagemaker-edge: Update sagemaker-edge client to 
    latest version
  * api-change:forecast: Update forecast client to latest version
  * api-change:healthlake: Update healthlake client to latest 
    version
  * api-change:emr-containers: Update emr-containers client to 
    latest version
- Changes in 1.19.31
  * api-change:dms: Update dms client to latest version
  * api-change:servicecatalog-appregistry: Update servicecatalog-
    appregistry client to latest version
- Changes in 1.19.30
  * api-change:ssm: Update ssm client to latest version
  * api-change:ec2: Update ec2 client to latest version
  * api-change:workspaces: Update workspaces client to latest 
    version
  * api-change:license-manager: Update license-manager client to 
    latest version
  * api-change:lambda: Update lambda client to latest version
  * api-change:ds: Update ds client to latest version
  * api-change:kafka: Update kafka client to latest version
  * api-change:medialive: Update medialive client to latest 
    version
  * api-change:rds: Update rds client to latest version
- Changes in 1.19.29
  * api-change:license-manager: Update license-manager client to 
    latest version
  * api-change:compute-optimizer: Update compute-optimizer client 
    to latest version
  * api-change:amplifybackend: Update amplifybackend client to 
    latest version
  * api-change:batch: Update batch client to latest version
- Changes in 1.19.28
  * api-change:customer-profiles: Update customer-profiles client 
    to latest version
- Changes in 1.19.27
  * api-change:sagemaker-featurestore-runtime: Update sagemaker-
    featurestore-runtime client to latest version
  * api-change:ecr-public: Update ecr-public client to latest 
    version
  * api-change:honeycode: Update honeycode client to latest 
    version
  * api-change:eks: Update eks client to latest version
  * api-change:amplifybackend: Update amplifybackend client to 
    latest version
  * api-change:lambda: Update lambda client to latest version
  * api-change:sagemaker: Update sagemaker client to latest 
    version
  * api-change:lookoutvision: Update lookoutvision client to 
    latest version
  * api-change:ec2: Update ec2 client to latest version
  * api-change:connect: Update connect client to latest version
  * api-change:connect-contact-lens: Update connect-contact-lens 
    client to latest version
  * api-change:profile: Update profile client to latest version
  * api-change:s3: Update s3 client to latest version
  * api-change:appintegrations: Update appintegrations client to 
    latest version
  * api-change:ds: Update ds client to latest version
  * api-change:devops-guru: Update devops-guru client to latest 
    version
- Changes in 1.19.26
  * api-change:ec2: Update ec2 client to latest version 
- Unpin upper versions
- Refresh remove_nose.patch

OBS-URL: https://build.opensuse.org/request/show/860210
OBS-URL: https://build.opensuse.org/package/show/devel:languages:python:aws/python-botocore?expand=0&rev=91
2021-01-04 17:50:40 +00:00

29175 lines
1.2 MiB

From 5f7a437ddcc91546eb2e0c0d4fbcdcf9cbae0ad6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Mat=C4=9Bj=20Cepl?= <mcepl@cepl.eu>
Date: Wed, 26 Aug 2020 18:50:12 +0200
Subject: [PATCH 01/14] Remove dependency on nose.
Mostly independent of anything outside of the standard library, except
I need pytest for marking tests as slow. If these marks can be
sacrificed, there would be no new dependency.
---
requirements.txt | 7 +-
setup.cfg | 3 +
tests/__init__.py | 22 +-
tests/acceptance/features/steps/base.py | 3 +-
tests/functional/csm/test_monitoring.py | 6 +-
.../docs/test_shared_example_config.py | 2 +-
tests/functional/test_alias.py | 4 +-
tests/functional/test_client_class_names.py | 21 +-
tests/functional/test_cognito_idp.py | 9 +-
tests/functional/test_credentials.py | 4 +-
tests/functional/test_endpoints.py | 15 +-
tests/functional/test_event_alias.py | 4 +-
tests/functional/test_h2_required.py | 4 +-
tests/functional/test_history.py | 4 +-
tests/functional/test_model_backcompat.py | 9 +-
tests/functional/test_model_completeness.py | 5 +-
tests/functional/test_paginate.py | 44 +-
tests/functional/test_paginator_config.py | 7 +-
tests/functional/test_public_apis.py | 2 +-
tests/functional/test_regions.py | 105 ++--
tests/functional/test_response_shadowing.py | 9 +-
tests/functional/test_retry.py | 3 +-
tests/functional/test_s3.py | 481 +++++++++---------
tests/functional/test_service_alias.py | 2 +-
tests/functional/test_service_names.py | 21 +-
tests/functional/test_six_imports.py | 2 +-
tests/functional/test_stub.py | 12 +-
tests/functional/test_waiter_config.py | 4 +-
tests/integration/test_client.py | 4 +-
tests/integration/test_ec2.py | 2 -
tests/integration/test_emr.py | 4 +-
tests/integration/test_s3.py | 17 +-
tests/integration/test_smoke.py | 12 +-
tests/integration/test_sts.py | 4 +-
tests/integration/test_waiters.py | 4 +-
tests/unit/auth/test_sigv4.py | 20 +-
tests/unit/docs/test_utils.py | 2 +-
tests/unit/response_parsing/README.rst | 12 +-
.../response_parsing/test_response_parsing.py | 8 +-
tests/unit/retries/test_special.py | 2 -
tests/unit/retries/test_standard.py | 34 +-
tests/unit/test_awsrequest.py | 9 +-
tests/unit/test_client.py | 18 +-
tests/unit/test_compat.py | 136 +++--
tests/unit/test_config_provider.py | 11 +-
tests/unit/test_credentials.py | 20 +-
tests/unit/test_discovery.py | 5 +-
tests/unit/test_endpoint.py | 5 +-
tests/unit/test_errorfactory.py | 3 +-
tests/unit/test_eventstream.py | 157 +++---
tests/unit/test_exceptions.py | 6 +-
tests/unit/test_handlers.py | 2 +-
.../test_http_client_exception_mapping.py | 20 +-
tests/unit/test_http_session.py | 18 +-
tests/unit/test_loaders.py | 9 +-
tests/unit/test_model.py | 12 +-
tests/unit/test_paginate.py | 2 +-
tests/unit/test_parsers.py | 19 +-
tests/unit/test_protocols.py | 30 +-
tests/unit/test_s3_addressing.py | 8 +-
tests/unit/test_utils.py | 2 +-
tests/unit/test_waiters.py | 2 +-
62 files changed, 705 insertions(+), 728 deletions(-)
diff --git a/requirements.txt b/requirements.txt
index d5296a83..111c2e64 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,9 @@
tox>=2.5.0,<3.0.0
-nose==1.3.7
-mock==1.3.0
+pytest>=4.6
+pluggy>=0.7
+py>=1.5.0
+pytest-cov
+mock==1.3.0; python_version < '3.3'
wheel==0.24.0
behave==1.2.5
jsonschema==2.5.1
diff --git a/setup.cfg b/setup.cfg
index ca187c21..64e23f38 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -11,4 +11,7 @@ requires-dist =
[egg_info]
tag_build =
tag_date = 0
+[tool:pytest]
+markers = slow: marks tests as slow
+
diff --git a/tests/__init__.py b/tests/__init__.py
index bf862556..046bdb2b 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -29,8 +29,6 @@ from subprocess import Popen, PIPE
from dateutil.tz import tzlocal
import unittest
-from nose.tools import assert_equal
-
import botocore.loaders
import botocore.session
from botocore.awsrequest import AWSResponse
@@ -346,16 +344,16 @@ def assert_url_equal(url1, url2):
# Because the query string ordering isn't relevant, we have to parse
# every single part manually and then handle the query string.
- assert_equal(parts1.scheme, parts2.scheme)
- assert_equal(parts1.netloc, parts2.netloc)
- assert_equal(parts1.path, parts2.path)
- assert_equal(parts1.params, parts2.params)
- assert_equal(parts1.fragment, parts2.fragment)
- assert_equal(parts1.username, parts2.username)
- assert_equal(parts1.password, parts2.password)
- assert_equal(parts1.hostname, parts2.hostname)
- assert_equal(parts1.port, parts2.port)
- assert_equal(parse_qs(parts1.query), parse_qs(parts2.query))
+ assert parts1.scheme == parts2.scheme
+ assert parts1.netloc == parts2.netloc
+ assert parts1.path == parts2.path
+ assert parts1.params == parts2.params
+ assert parts1.fragment == parts2.fragment
+ assert parts1.username == parts2.username
+ assert parts1.password == parts2.password
+ assert parts1.hostname == parts2.hostname
+ assert parts1.port == parts2.port
+ assert parse_qs(parts1.query) == parse_qs(parts2.query)
class HTTPStubberException(Exception):
diff --git a/tests/acceptance/features/steps/base.py b/tests/acceptance/features/steps/base.py
index 918d1fa7..35a9c8c7 100644
--- a/tests/acceptance/features/steps/base.py
+++ b/tests/acceptance/features/steps/base.py
@@ -4,7 +4,6 @@ from botocore import xform_name
from botocore.exceptions import ClientError
from behave import when, then
-from nose.tools import assert_equal
def _params_from_table(table):
@@ -72,7 +71,7 @@ def api_call_with_json_and_error(context, operation):
@then(u'I expect the response error code to be "{}"')
def then_expected_error(context, code):
- assert_equal(context.error_response.response['Error']['Code'], code)
+ assert context.error_response.response['Error']['Code'] == code
@then(u'the value at "{}" should be a list')
diff --git a/tests/functional/csm/test_monitoring.py b/tests/functional/csm/test_monitoring.py
index 697f8085..b94dc996 100644
--- a/tests/functional/csm/test_monitoring.py
+++ b/tests/functional/csm/test_monitoring.py
@@ -19,7 +19,6 @@ import socket
import threading
import mock
-from nose.tools import assert_equal
from tests import temporary_file
from tests import ClientHTTPStubber
@@ -50,7 +49,7 @@ EXPECTED_EXCEPTIONS_THROWN = (
def test_client_monitoring():
test_cases = _load_test_cases()
for case in test_cases:
- yield _run_test_case, case
+ _run_test_case(case)
def _load_test_cases():
@@ -121,8 +120,7 @@ def _run_test_case(case):
case['configuration'], listener.port) as session:
for api_call in case['apiCalls']:
_make_api_call(session, api_call)
- assert_equal(
- listener.received_events, case['expectedMonitoringEvents'])
+ assert listener.received_events == case['expectedMonitoringEvents']
def _make_api_call(session, api_call):
diff --git a/tests/functional/docs/test_shared_example_config.py b/tests/functional/docs/test_shared_example_config.py
index fdd21cba..c876957c 100644
--- a/tests/functional/docs/test_shared_example_config.py
+++ b/tests/functional/docs/test_shared_example_config.py
@@ -27,7 +27,7 @@ def test_lint_shared_example_configs():
examples = example_config.get("examples", {})
for operation, operation_examples in examples.items():
for example in operation_examples:
- yield _lint_single_example, operation, example, service_model
+ _lint_single_example(operation, example, service_model)
def _lint_single_example(operation_name, example_config, service_model):
diff --git a/tests/functional/test_alias.py b/tests/functional/test_alias.py
index f9bc5403..c80e1c78 100644
--- a/tests/functional/test_alias.py
+++ b/tests/functional/test_alias.py
@@ -49,13 +49,13 @@ ALIAS_CASES = [
def test_can_use_alias():
session = botocore.session.get_session()
for case in ALIAS_CASES:
- yield _can_use_parameter_in_client_call, session, case
+ _can_use_parameter_in_client_call(session, case)
def test_can_use_original_name():
session = botocore.session.get_session()
for case in ALIAS_CASES:
- yield _can_use_parameter_in_client_call, session, case, False
+ _can_use_parameter_in_client_call(session, case, False)
def _can_use_parameter_in_client_call(session, case, use_alias=True):
diff --git a/tests/functional/test_client_class_names.py b/tests/functional/test_client_class_names.py
index 6f6a806e..a52ce380 100644
--- a/tests/functional/test_client_class_names.py
+++ b/tests/functional/test_client_class_names.py
@@ -10,11 +10,9 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from nose.tools import assert_equal
-
+from tests import unittest
import botocore.session
-
REGION = 'us-east-1'
SERVICE_TO_CLASS_NAME = {
@@ -69,13 +67,10 @@ SERVICE_TO_CLASS_NAME = {
}
-def test_client_has_correct_class_name():
- session = botocore.session.get_session()
- for service_name in SERVICE_TO_CLASS_NAME:
- client = session.create_client(service_name, REGION)
- yield (_assert_class_name_matches_ref_class_name, client,
- SERVICE_TO_CLASS_NAME[service_name])
-
-
-def _assert_class_name_matches_ref_class_name(client, ref_class_name):
- assert_equal(client.__class__.__name__, ref_class_name)
+class TestClientClassNames(unittest.TestCase):
+ def test_client_has_correct_class_name(self):
+ session = botocore.session.get_session()
+ for service_name in SERVICE_TO_CLASS_NAME:
+ client = session.create_client(service_name, REGION)
+ self.assertEqual(client.__class__.__name__,
+ SERVICE_TO_CLASS_NAME[service_name])
diff --git a/tests/functional/test_cognito_idp.py b/tests/functional/test_cognito_idp.py
index 81f49321..7f919d3a 100644
--- a/tests/functional/test_cognito_idp.py
+++ b/tests/functional/test_cognito_idp.py
@@ -12,8 +12,6 @@
# language governing permissions and limitations under the License.
import mock
-from nose.tools import assert_false
-
from tests import create_session, ClientHTTPStubber
@@ -95,8 +93,7 @@ def test_unsigned_operations():
client = session.create_client('cognito-idp', 'us-west-2')
for operation, params in operation_params.items():
- test_case = UnsignedOperationTestCase(client, operation, params)
- yield test_case.run
+ UnsignedOperationTestCase(client, operation, params).run()
class UnsignedOperationTestCase(object):
@@ -114,7 +111,5 @@ class UnsignedOperationTestCase(object):
operation(**self._parameters)
request = self._http_stubber.requests[0]
- assert_false(
- 'authorization' in request.headers,
+ assert 'authorization' not in request.headers, \
'authorization header found in unsigned operation'
- )
diff --git a/tests/functional/test_credentials.py b/tests/functional/test_credentials.py
index 18db4154..a8d7bacc 100644
--- a/tests/functional/test_credentials.py
+++ b/tests/functional/test_credentials.py
@@ -41,7 +41,7 @@ from botocore.session import Session
from botocore.exceptions import InvalidConfigError, InfiniteLoopConfigError
from botocore.stub import Stubber
from botocore.utils import datetime2timestamp
-
+from botocore.compat import six
class TestCredentialRefreshRaces(unittest.TestCase):
def assert_consistent_credentials_seen(self, creds, func):
@@ -826,7 +826,7 @@ class TestProcessProvider(unittest.TestCase):
# Finally `(?s)` at the beginning makes dots match newlines so
# we can handle a multi-line string.
reg = r"(?s)^((?!b').)*$"
- with self.assertRaisesRegexp(CredentialRetrievalError, reg):
+ with six.assertRaisesRegex(self, CredentialRetrievalError, reg):
session.get_credentials()
diff --git a/tests/functional/test_endpoints.py b/tests/functional/test_endpoints.py
index 4b60d66e..113ec54f 100644
--- a/tests/functional/test_endpoints.py
+++ b/tests/functional/test_endpoints.py
@@ -10,7 +10,6 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from nose.tools import assert_equal
from botocore.session import get_session
@@ -138,9 +137,7 @@ def test_endpoint_matches_service():
# prefix.
endpoint_prefix = ENDPOINT_PREFIX_OVERRIDE.get(endpoint_prefix,
endpoint_prefix)
- yield (_assert_known_endpoint_prefix,
- endpoint_prefix,
- known_endpoint_prefixes)
+ _assert_known_endpoint_prefix(endpoint_prefix, known_endpoint_prefixes)
def _assert_known_endpoint_prefix(endpoint_prefix, known_endpoint_prefixes):
@@ -160,7 +157,7 @@ def test_service_name_matches_endpoint_prefix():
services = loader.list_available_services('service-2')
for service in services:
- yield _assert_service_name_matches_endpoint_prefix, session, service
+ _assert_service_name_matches_endpoint_prefix(session, service)
def _assert_service_name_matches_endpoint_prefix(session, service_name):
@@ -170,8 +167,6 @@ def _assert_service_name_matches_endpoint_prefix(session, service_name):
# Handle known exceptions where we have renamed the service directory
# for one reason or another.
actual_service_name = SERVICE_RENAMES.get(service_name, service_name)
- assert_equal(
- computed_name, actual_service_name,
- "Actual service name `%s` does not match expected service name "
- "we computed: `%s`" % (
- actual_service_name, computed_name))
+ assert computed_name == actual_service_name, \
+ ("Actual service name `%s` does not match expected service name " +
+ "we computed: `%s`") % (actual_service_name, computed_name)
diff --git a/tests/functional/test_event_alias.py b/tests/functional/test_event_alias.py
index 604f0128..b821f4d9 100644
--- a/tests/functional/test_event_alias.py
+++ b/tests/functional/test_event_alias.py
@@ -584,8 +584,8 @@ def test_event_alias():
service_id = SERVICES[client_name]['service_id']
if endpoint_prefix is not None:
yield _assert_handler_called, client_name, endpoint_prefix
- yield _assert_handler_called, client_name, service_id
- yield _assert_handler_called, client_name, client_name
+ _assert_handler_called(client_name, service_id)
+ _assert_handler_called(client_name, client_name)
def _assert_handler_called(client_name, event_part):
diff --git a/tests/functional/test_h2_required.py b/tests/functional/test_h2_required.py
index 3e7ba1c2..661a5613 100644
--- a/tests/functional/test_h2_required.py
+++ b/tests/functional/test_h2_required.py
@@ -29,12 +29,12 @@ def test_all_uses_of_h2_are_known():
service_model = session.get_service_model(service)
h2_config = service_model.metadata.get('protocolSettings', {}).get('h2')
if h2_config == 'required':
- yield _assert_h2_service_is_known, service
+ _assert_h2_service_is_known(service)
elif h2_config == 'eventstream':
for operation in service_model.operation_names:
operation_model = service_model.operation_model(operation)
if operation_model.has_event_stream_output:
- yield _assert_h2_operation_is_known, service, operation
+ _assert_h2_operation_is_known(service, operation)
def _assert_h2_service_is_known(service):
diff --git a/tests/functional/test_history.py b/tests/functional/test_history.py
index 1968dc83..d3d46f4a 100644
--- a/tests/functional/test_history.py
+++ b/tests/functional/test_history.py
@@ -87,10 +87,10 @@ class TestRecordStatementsInjections(BaseSessionTest):
self.assertIsNone(body)
streaming = payload['streaming']
- self.assertEquals(streaming, False)
+ self.assertEqual(streaming, False)
url = payload['url']
- self.assertEquals(url, 'https://s3.us-west-2.amazonaws.com/')
+ self.assertEqual(url, 'https://s3.us-west-2.amazonaws.com/')
self.assertEqual(source, 'BOTOCORE')
diff --git a/tests/functional/test_model_backcompat.py b/tests/functional/test_model_backcompat.py
index 9586b5f1..dac77552 100644
--- a/tests/functional/test_model_backcompat.py
+++ b/tests/functional/test_model_backcompat.py
@@ -12,7 +12,6 @@
# language governing permissions and limitations under the License.
import os
-from nose.tools import assert_equal
from botocore.session import Session
from tests import ClientHTTPStubber
from tests.functional import TEST_MODELS_DIR
@@ -56,8 +55,7 @@ def test_old_model_continues_to_work():
'Content-Type': 'application/x-amz-json-1.1'},
body=b'{"CertificateSummaryList":[]}')
response = client.list_certificates()
- assert_equal(
- response,
+ assert response == \
{'CertificateSummaryList': [],
'ResponseMetadata': {
'HTTPHeaders': {
@@ -69,8 +67,7 @@ def test_old_model_continues_to_work():
'RequestId': 'abcd',
'RetryAttempts': 0}
}
- )
# Also verify we can use the paginators as well.
- assert_equal(client.can_paginate('list_certificates'), True)
- assert_equal(client.waiter_names, ['certificate_validated'])
+ assert client.can_paginate('list_certificates')
+ assert client.waiter_names == ['certificate_validated']
diff --git a/tests/functional/test_model_completeness.py b/tests/functional/test_model_completeness.py
index 78dd1529..484ee23a 100644
--- a/tests/functional/test_model_completeness.py
+++ b/tests/functional/test_model_completeness.py
@@ -38,5 +38,6 @@ def test_paginators_and_waiters_are_not_lost_in_new_version():
versions = Loader().list_api_versions(service_name, 'service-2')
if len(versions) > 1:
for type_name in ['paginators-1', 'waiters-2']:
- yield (_test_model_is_not_lost, service_name,
- type_name, versions[-2], versions[-1])
+ _test_model_is_not_lost(service_name,
+ type_name,
+ versions[-2], versions[-1])
diff --git a/tests/functional/test_paginate.py b/tests/functional/test_paginate.py
index afbf3816..974f4839 100644
--- a/tests/functional/test_paginate.py
+++ b/tests/functional/test_paginate.py
@@ -14,9 +14,7 @@ from __future__ import division
from math import ceil
from datetime import datetime
-from nose.tools import assert_equal
-
-from tests import random_chars
+from tests import random_chars, unittest
from tests import BaseSessionTest
from botocore.stub import Stubber, StubAssertionError
from botocore.paginate import TokenDecoder, TokenEncoder
@@ -79,7 +77,7 @@ class TestAutoscalingPagination(BaseSessionTest):
self.stubber.activate()
def _setup_scaling_pagination(self, page_size=200, max_items=100,
- total_items=600):
+ total_items=600):
"""
Add to the stubber to test paginating describe_scaling_activities.
@@ -217,22 +215,22 @@ class TestCloudwatchLogsPagination(BaseSessionTest):
self.assertEqual(len(result['events']), 1)
-def test_token_encoding():
- cases = [
- {'foo': 'bar'},
- {'foo': b'bar'},
- {'foo': {'bar': b'baz'}},
- {'foo': ['bar', b'baz']},
- {'foo': b'\xff'},
- {'foo': {'bar': b'baz', 'bin': [b'bam']}},
- ]
-
- for token_dict in cases:
- yield assert_token_encodes_and_decodes, token_dict
-
-
-def assert_token_encodes_and_decodes(token_dict):
- encoded = TokenEncoder().encode(token_dict)
- assert isinstance(encoded, six.string_types)
- decoded = TokenDecoder().decode(encoded)
- assert_equal(decoded, token_dict)
+class TestTokenEncoding(unittest.TestCase):
+ def test_token_encoding(self):
+ cases = [
+ {'foo': 'bar'},
+ {'foo': b'bar'},
+ {'foo': {'bar': b'baz'}},
+ {'foo': ['bar', b'baz']},
+ {'foo': b'\xff'},
+ {'foo': {'bar': b'baz', 'bin': [b'bam']}},
+ ]
+
+ for token_dict in cases:
+ self.assert_token_encodes_and_decodes(token_dict)
+
+ def assert_token_encodes_and_decodes(self, token_dict):
+ encoded = TokenEncoder().encode(token_dict)
+ assert isinstance(encoded, six.string_types)
+ decoded = TokenDecoder().decode(encoded)
+ self.assertEqual(decoded, token_dict)
diff --git a/tests/functional/test_paginator_config.py b/tests/functional/test_paginator_config.py
index 1c6ef44d..b2c3cbfa 100644
--- a/tests/functional/test_paginator_config.py
+++ b/tests/functional/test_paginator_config.py
@@ -140,12 +140,7 @@ def test_lint_pagination_configs():
'paginators-1',
service_model.api_version)
for op_name, single_config in page_config['pagination'].items():
- yield (
- _lint_single_paginator,
- op_name,
- single_config,
- service_model
- )
+ _lint_single_paginator(op_name, single_config, service_model)
def _lint_single_paginator(operation_name, page_config,
diff --git a/tests/functional/test_public_apis.py b/tests/functional/test_public_apis.py
index 0207e9bf..7e7cbf33 100644
--- a/tests/functional/test_public_apis.py
+++ b/tests/functional/test_public_apis.py
@@ -73,4 +73,4 @@ def test_public_apis_will_not_be_signed():
for operation_name in PUBLIC_API_TESTS[service_name]:
kwargs = PUBLIC_API_TESTS[service_name][operation_name]
method = getattr(client, xform_name(operation_name))
- yield _test_public_apis_will_not_be_signed, client, method, kwargs
+ _test_public_apis_will_not_be_signed(client, method, kwargs)
diff --git a/tests/functional/test_regions.py b/tests/functional/test_regions.py
index 68ba58ca..42277c2d 100644
--- a/tests/functional/test_regions.py
+++ b/tests/functional/test_regions.py
@@ -10,10 +10,9 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from tests import create_session
+from tests import create_session, unittest
import mock
-from nose.tools import assert_equal, assert_raises
from botocore.client import ClientEndpointBridge
from botocore.exceptions import NoRegionError
@@ -448,64 +447,62 @@ def _get_patched_session():
return session
-def test_known_endpoints():
- # Verify the actual values from the partition files. While
- # TestEndpointHeuristics verified the generic functionality given any
- # endpoints file, this test actually verifies the partition data against a
- # fixed list of known endpoints. This list doesn't need to be kept 100% up
- # to date, but serves as a basis for regressions as the endpoint data
- # logic evolves.
- resolver = _get_patched_session()._get_internal_component(
- 'endpoint_resolver')
- for region_name, service_dict in KNOWN_REGIONS.items():
- for service_name, endpoint in service_dict.items():
- yield (_test_single_service_region, service_name,
- region_name, endpoint, resolver)
+class TestRegions(unittest.TestCase):
+ def test_known_endpoints(self):
+ # Verify the actual values from the partition files. While
+ # TestEndpointHeuristics verified the generic functionality given
+ # any endpoints file, this test actually verifies the partition
+ # data against a fixed list of known endpoints. This list doesn't
+ # need to be kept 100% up to date, but serves as a basis for
+ # regressions as the endpoint data logic evolves.
+ resolver = _get_patched_session()._get_internal_component(
+ 'endpoint_resolver')
+ for region_name, service_dict in KNOWN_REGIONS.items():
+ for service_name, endpoint in service_dict.items():
+ self._test_single_service_region(service_name,
+ region_name, endpoint,
+ resolver)
+ def _test_single_service_region(self, service_name, region_name,
+ expected_endpoint, resolver):
+ bridge = ClientEndpointBridge(resolver, None, None)
+ result = bridge.resolve(service_name, region_name)
+ expected = 'https://%s' % expected_endpoint
+ self.assertEqual(result['endpoint_url'], expected)
-def _test_single_service_region(service_name, region_name,
- expected_endpoint, resolver):
- bridge = ClientEndpointBridge(resolver, None, None)
- result = bridge.resolve(service_name, region_name)
- expected = 'https://%s' % expected_endpoint
- assert_equal(result['endpoint_url'], expected)
+ # Ensure that all S3 regions use s3v4 instead of v4
+ def test_all_s3_endpoints_have_s3v4(self):
+ session = _get_patched_session()
+ partitions = session.get_available_partitions()
+ resolver = session._get_internal_component('endpoint_resolver')
+ for partition_name in partitions:
+ for endpoint in session.get_available_regions('s3', partition_name):
+ resolved = resolver.construct_endpoint('s3', endpoint)
+ assert 's3v4' in resolved['signatureVersions']
+ assert 'v4' not in resolved['signatureVersions']
+ def _test_single_service_partition_endpoint(self, service_name,
+ expected_endpoint,
+ resolver):
+ bridge = ClientEndpointBridge(resolver)
+ result = bridge.resolve(service_name)
+ assert result['endpoint_url'] == expected_endpoint
-# Ensure that all S3 regions use s3v4 instead of v4
-def test_all_s3_endpoints_have_s3v4():
- session = _get_patched_session()
- partitions = session.get_available_partitions()
- resolver = session._get_internal_component('endpoint_resolver')
- for partition_name in partitions:
- for endpoint in session.get_available_regions('s3', partition_name):
- resolved = resolver.construct_endpoint('s3', endpoint)
- assert 's3v4' in resolved['signatureVersions']
- assert 'v4' not in resolved['signatureVersions']
+ def test_known_endpoints_other(self):
+ resolver = _get_patched_session()._get_internal_component(
+ 'endpoint_resolver')
+ for service_name, endpoint in KNOWN_AWS_PARTITION_WIDE.items():
+ self._test_single_service_partition_endpoint(service_name,
+ endpoint, resolver)
-
-def test_known_endpoints():
- resolver = _get_patched_session()._get_internal_component(
- 'endpoint_resolver')
- for service_name, endpoint in KNOWN_AWS_PARTITION_WIDE.items():
- yield (_test_single_service_partition_endpoint, service_name,
- endpoint, resolver)
-
-
-def _test_single_service_partition_endpoint(service_name, expected_endpoint,
- resolver):
- bridge = ClientEndpointBridge(resolver)
- result = bridge.resolve(service_name)
- assert_equal(result['endpoint_url'], expected_endpoint)
-
-
-def test_non_partition_endpoint_requires_region():
- resolver = _get_patched_session()._get_internal_component(
- 'endpoint_resolver')
- assert_raises(NoRegionError, resolver.construct_endpoint, 'ec2')
+ def test_non_partition_endpoint_requires_region(self):
+ resolver = _get_patched_session()._get_internal_component(
+ 'endpoint_resolver')
+ with self.assertRaises(NoRegionError):
+ resolver.construct_endpoint('ec2')
class TestEndpointResolution(BaseSessionTest):
-
def setUp(self):
super(TestEndpointResolution, self).setUp()
self.xml_response = (
@@ -526,7 +523,7 @@ class TestEndpointResolution(BaseSessionTest):
client, stubber = self.create_stubbed_client('s3', 'us-east-2')
stubber.add_response()
client.list_buckets()
- self.assertEquals(
+ self.assertEqual(
stubber.requests[0].url,
'https://s3.us-east-2.amazonaws.com/'
)
@@ -537,7 +534,7 @@ class TestEndpointResolution(BaseSessionTest):
client.list_buckets()
# Validate we don't fall back to partition endpoint for
# regionalized services.
- self.assertEquals(
+ self.assertEqual(
stubber.requests[0].url,
'https://s3.not-real.amazonaws.com/'
)
diff --git a/tests/functional/test_response_shadowing.py b/tests/functional/test_response_shadowing.py
index b18b4a21..bd53fd91 100644
--- a/tests/functional/test_response_shadowing.py
+++ b/tests/functional/test_response_shadowing.py
@@ -11,7 +11,6 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.session import Session
-from nose.tools import assert_false
def _all_services():
@@ -33,17 +32,17 @@ def _assert_not_shadowed(key, shape):
msg = (
'Found shape "%s" that shadows the botocore response key "%s"'
)
- assert_false(key in shape.members, msg % (shape.name, key))
+ assert key not in shape.members, msg % (shape.name, key)
def test_response_metadata_is_not_shadowed():
for operation_model in _all_operations():
shape = operation_model.output_shape
- yield _assert_not_shadowed, 'ResponseMetadata', shape
+ _assert_not_shadowed('ResponseMetadata', shape)
def test_exceptions_do_not_shadow():
for service_model in _all_services():
for shape in service_model.error_shapes:
- yield _assert_not_shadowed, 'ResponseMetadata', shape
- yield _assert_not_shadowed, 'Error', shape
+ _assert_not_shadowed('ResponseMetadata', shape)
+ _assert_not_shadowed('Error', shape)
diff --git a/tests/functional/test_retry.py b/tests/functional/test_retry.py
index 51e200cd..cb2e7d28 100644
--- a/tests/functional/test_retry.py
+++ b/tests/functional/test_retry.py
@@ -16,6 +16,7 @@ from tests import BaseSessionTest, mock, ClientHTTPStubber
from botocore.exceptions import ClientError
from botocore.config import Config
+from botocore.compat import six
class BaseRetryTest(BaseSessionTest):
@@ -38,7 +39,7 @@ class BaseRetryTest(BaseSessionTest):
with ClientHTTPStubber(client) as http_stubber:
for _ in range(num_responses):
http_stubber.add_response(status=status, body=body)
- with self.assertRaisesRegexp(
+ with six.assertRaisesRegex(self,
ClientError, 'reached max retries: %s' % num_retries):
yield
self.assertEqual(len(http_stubber.requests), num_responses)
diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py
index b7645125..5354a52b 100644
--- a/tests/functional/test_s3.py
+++ b/tests/functional/test_s3.py
@@ -14,7 +14,6 @@ import re
from tests import temporary_file
from tests import unittest, mock, BaseSessionTest, create_session, ClientHTTPStubber
-from nose.tools import assert_equal
import botocore.session
from botocore.config import Config
@@ -447,8 +446,8 @@ class TestS3Copy(BaseS3OperationTest):
)
# Validate we retried and got second body
- self.assertEquals(len(self.http_stubber.requests), 2)
- self.assertEquals(response['ResponseMetadata']['HTTPStatusCode'], 200)
+ self.assertEqual(len(self.http_stubber.requests), 2)
+ self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
self.assertTrue('CopyObjectResult' in response)
def test_s3_copy_object_with_incomplete_response(self):
@@ -1296,48 +1295,49 @@ class TestGeneratePresigned(BaseS3OperationTest):
'get_object', {'Bucket': 'mybucket', 'Key': 'mykey'})
self.assert_is_v2_presigned_url(url)
+
def test_checksums_included_in_expected_operations():
"""Validate expected calls include Content-MD5 header"""
t = S3ChecksumCases(_verify_checksum_in_headers)
- yield t.case('put_bucket_tagging',
- {"Bucket": "foo", "Tagging":{"TagSet":[]}})
- yield t.case('put_bucket_lifecycle',
- {"Bucket": "foo", "LifecycleConfiguration":{"Rules":[]}})
- yield t.case('put_bucket_lifecycle_configuration',
- {"Bucket": "foo", "LifecycleConfiguration":{"Rules":[]}})
- yield t.case('put_bucket_cors',
- {"Bucket": "foo", "CORSConfiguration":{"CORSRules": []}})
- yield t.case('delete_objects',
- {"Bucket": "foo", "Delete": {"Objects": [{"Key": "bar"}]}})
- yield t.case('put_bucket_replication',
- {"Bucket": "foo",
- "ReplicationConfiguration": {"Role":"", "Rules": []}})
- yield t.case('put_bucket_acl',
- {"Bucket": "foo", "AccessControlPolicy":{}})
- yield t.case('put_bucket_logging',
- {"Bucket": "foo",
- "BucketLoggingStatus":{}})
- yield t.case('put_bucket_notification',
- {"Bucket": "foo", "NotificationConfiguration":{}})
- yield t.case('put_bucket_policy',
- {"Bucket": "foo", "Policy": "<bucket-policy>"})
- yield t.case('put_bucket_request_payment',
- {"Bucket": "foo", "RequestPaymentConfiguration":{"Payer": ""}})
- yield t.case('put_bucket_versioning',
- {"Bucket": "foo", "VersioningConfiguration":{}})
- yield t.case('put_bucket_website',
- {"Bucket": "foo",
- "WebsiteConfiguration":{}})
- yield t.case('put_object_acl',
- {"Bucket": "foo", "Key": "bar", "AccessControlPolicy":{}})
- yield t.case('put_object_legal_hold',
- {"Bucket": "foo", "Key": "bar", "LegalHold":{"Status": "ON"}})
- yield t.case('put_object_retention',
- {"Bucket": "foo", "Key": "bar",
- "Retention":{"RetainUntilDate":"2020-11-05"}})
- yield t.case('put_object_lock_configuration',
- {"Bucket": "foo", "ObjectLockConfiguration":{}})
+ t.case('put_bucket_tagging',
+ {"Bucket": "foo", "Tagging": {"TagSet": []}})
+ t.case('put_bucket_lifecycle',
+ {"Bucket": "foo", "LifecycleConfiguration": {"Rules": []}})
+ t.case('put_bucket_lifecycle_configuration',
+ {"Bucket": "foo", "LifecycleConfiguration": {"Rules": []}})
+ t.case('put_bucket_cors',
+ {"Bucket": "foo", "CORSConfiguration": {"CORSRules": []}})
+ t.case('delete_objects',
+ {"Bucket": "foo", "Delete": {"Objects": [{"Key": "bar"}]}})
+ t.case('put_bucket_replication',
+ {"Bucket": "foo",
+ "ReplicationConfiguration": {"Role": "", "Rules": []}})
+ t.case('put_bucket_acl',
+ {"Bucket": "foo", "AccessControlPolicy": {}})
+ t.case('put_bucket_logging',
+ {"Bucket": "foo",
+ "BucketLoggingStatus": {}})
+ t.case('put_bucket_notification',
+ {"Bucket": "foo", "NotificationConfiguration": {}})
+ t.case('put_bucket_policy',
+ {"Bucket": "foo", "Policy": "<bucket-policy>"})
+ t.case('put_bucket_request_payment',
+ {"Bucket": "foo", "RequestPaymentConfiguration": {"Payer": ""}})
+ t.case('put_bucket_versioning',
+ {"Bucket": "foo", "VersioningConfiguration": {}})
+ t.case('put_bucket_website',
+ {"Bucket": "foo",
+ "WebsiteConfiguration": {}})
+ t.case('put_object_acl',
+ {"Bucket": "foo", "Key": "bar", "AccessControlPolicy": {}})
+ t.case('put_object_legal_hold',
+ {"Bucket": "foo", "Key": "bar", "LegalHold": {"Status": "ON"}})
+ t.case('put_object_retention',
+ {"Bucket": "foo", "Key": "bar",
+ "Retention": {"RetainUntilDate": "2020-11-05"}})
+ t.case('put_object_lock_configuration',
+ {"Bucket": "foo", "ObjectLockConfiguration": {}})
def _verify_checksum_in_headers(operation, operation_kwargs):
@@ -1362,36 +1362,36 @@ def test_correct_url_used_for_s3():
t = S3AddressingCases(_verify_expected_endpoint_url)
# The default behavior for sigv2. DNS compatible buckets
- yield t.case(region='us-west-2', bucket='bucket', key='key',
- signature_version='s3',
- expected_url='https://bucket.s3.us-west-2.amazonaws.com/key')
- yield t.case(region='us-east-1', bucket='bucket', key='key',
- signature_version='s3',
- expected_url='https://bucket.s3.amazonaws.com/key')
- yield t.case(region='us-west-1', bucket='bucket', key='key',
- signature_version='s3',
- expected_url='https://bucket.s3.us-west-1.amazonaws.com/key')
- yield t.case(region='us-west-1', bucket='bucket', key='key',
- signature_version='s3', is_secure=False,
- expected_url='http://bucket.s3.us-west-1.amazonaws.com/key')
+ t.case(region='us-west-2', bucket='bucket', key='key',
+ signature_version='s3',
+ expected_url='https://bucket.s3.us-west-2.amazonaws.com/key')
+ t.case(region='us-east-1', bucket='bucket', key='key',
+ signature_version='s3',
+ expected_url='https://bucket.s3.amazonaws.com/key')
+ t.case(region='us-west-1', bucket='bucket', key='key',
+ signature_version='s3',
+ expected_url='https://bucket.s3.us-west-1.amazonaws.com/key')
+ t.case(region='us-west-1', bucket='bucket', key='key',
+ signature_version='s3', is_secure=False,
+ expected_url='http://bucket.s3.us-west-1.amazonaws.com/key')
# Virtual host addressing is independent of signature version.
- yield t.case(region='us-west-2', bucket='bucket', key='key',
- signature_version='s3v4',
- expected_url=(
- 'https://bucket.s3.us-west-2.amazonaws.com/key'))
- yield t.case(region='us-east-1', bucket='bucket', key='key',
- signature_version='s3v4',
- expected_url='https://bucket.s3.amazonaws.com/key')
- yield t.case(region='us-west-1', bucket='bucket', key='key',
- signature_version='s3v4',
- expected_url=(
- 'https://bucket.s3.us-west-1.amazonaws.com/key'))
- yield t.case(region='us-west-1', bucket='bucket', key='key',
- signature_version='s3v4', is_secure=False,
- expected_url=(
- 'http://bucket.s3.us-west-1.amazonaws.com/key'))
- yield t.case(
+ t.case(region='us-west-2', bucket='bucket', key='key',
+ signature_version='s3v4',
+ expected_url=(
+ 'https://bucket.s3.us-west-2.amazonaws.com/key'))
+ t.case(region='us-east-1', bucket='bucket', key='key',
+ signature_version='s3v4',
+ expected_url='https://bucket.s3.amazonaws.com/key')
+ t.case(region='us-west-1', bucket='bucket', key='key',
+ signature_version='s3v4',
+ expected_url=(
+ 'https://bucket.s3.us-west-1.amazonaws.com/key'))
+ t.case(region='us-west-1', bucket='bucket', key='key',
+ signature_version='s3v4', is_secure=False,
+ expected_url=(
+ 'http://bucket.s3.us-west-1.amazonaws.com/key'))
+ t.case(
region='us-west-1', bucket='bucket-with-num-1', key='key',
signature_version='s3v4', is_secure=False,
expected_url='http://bucket-with-num-1.s3.us-west-1.amazonaws.com/key')
@@ -1399,189 +1399,188 @@ def test_correct_url_used_for_s3():
# Regions outside of the 'aws' partition.
# These should still default to virtual hosted addressing
# unless explicitly configured otherwise.
- yield t.case(region='cn-north-1', bucket='bucket', key='key',
- signature_version='s3v4',
- expected_url=(
- 'https://bucket.s3.cn-north-1.amazonaws.com.cn/key'))
+ t.case(region='cn-north-1', bucket='bucket', key='key',
+ signature_version='s3v4',
+ expected_url=(
+ 'https://bucket.s3.cn-north-1.amazonaws.com.cn/key'))
# This isn't actually supported because cn-north-1 is sigv4 only,
# but we'll still double check that our internal logic is correct
# when building the expected url.
- yield t.case(region='cn-north-1', bucket='bucket', key='key',
- signature_version='s3',
- expected_url=(
- 'https://bucket.s3.cn-north-1.amazonaws.com.cn/key'))
+ t.case(region='cn-north-1', bucket='bucket', key='key',
+ signature_version='s3',
+ expected_url=(
+ 'https://bucket.s3.cn-north-1.amazonaws.com.cn/key'))
# If the request is unsigned, we should have the default
# fix_s3_host behavior which is to use virtual hosting where
# possible but fall back to path style when needed.
- yield t.case(region='cn-north-1', bucket='bucket', key='key',
- signature_version=UNSIGNED,
- expected_url=(
- 'https://bucket.s3.cn-north-1.amazonaws.com.cn/key'))
- yield t.case(region='cn-north-1', bucket='bucket.dot', key='key',
- signature_version=UNSIGNED,
- expected_url=(
- 'https://s3.cn-north-1.amazonaws.com.cn/bucket.dot/key'))
+ t.case(region='cn-north-1', bucket='bucket', key='key',
+ signature_version=UNSIGNED,
+ expected_url=(
+ 'https://bucket.s3.cn-north-1.amazonaws.com.cn/key'))
+ t.case(region='cn-north-1', bucket='bucket.dot', key='key',
+ signature_version=UNSIGNED,
+ expected_url=(
+ 'https://s3.cn-north-1.amazonaws.com.cn/bucket.dot/key'))
# And of course you can explicitly specify which style to use.
virtual_hosting = {'addressing_style': 'virtual'}
- yield t.case(region='cn-north-1', bucket='bucket', key='key',
- signature_version=UNSIGNED,
- s3_config=virtual_hosting,
- expected_url=(
- 'https://bucket.s3.cn-north-1.amazonaws.com.cn/key'))
+ t.case(region='cn-north-1', bucket='bucket', key='key',
+ signature_version=UNSIGNED,
+ s3_config=virtual_hosting,
+ expected_url=(
+ 'https://bucket.s3.cn-north-1.amazonaws.com.cn/key'))
path_style = {'addressing_style': 'path'}
- yield t.case(region='cn-north-1', bucket='bucket', key='key',
- signature_version=UNSIGNED,
- s3_config=path_style,
- expected_url=(
- 'https://s3.cn-north-1.amazonaws.com.cn/bucket/key'))
+ t.case(region='cn-north-1', bucket='bucket', key='key',
+ signature_version=UNSIGNED,
+ s3_config=path_style,
+ expected_url=(
+ 'https://s3.cn-north-1.amazonaws.com.cn/bucket/key'))
# If you don't have a DNS compatible bucket, we use path style.
- yield t.case(
+ t.case(
region='us-west-2', bucket='bucket.dot', key='key',
expected_url='https://s3.us-west-2.amazonaws.com/bucket.dot/key')
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket.dot', key='key',
expected_url='https://s3.amazonaws.com/bucket.dot/key')
- yield t.case(
+ t.case(
region='us-east-1', bucket='BucketName', key='key',
expected_url='https://s3.amazonaws.com/BucketName/key')
- yield t.case(
+ t.case(
region='us-west-1', bucket='bucket_name', key='key',
expected_url='https://s3.us-west-1.amazonaws.com/bucket_name/key')
- yield t.case(
+ t.case(
region='us-west-1', bucket='-bucket-name', key='key',
expected_url='https://s3.us-west-1.amazonaws.com/-bucket-name/key')
- yield t.case(
+ t.case(
region='us-west-1', bucket='bucket-name-', key='key',
expected_url='https://s3.us-west-1.amazonaws.com/bucket-name-/key')
- yield t.case(
+ t.case(
region='us-west-1', bucket='aa', key='key',
expected_url='https://s3.us-west-1.amazonaws.com/aa/key')
- yield t.case(
+ t.case(
region='us-west-1', bucket='a'*64, key='key',
expected_url=('https://s3.us-west-1.amazonaws.com/%s/key' % ('a' * 64))
)
# Custom endpoint url should always be used.
- yield t.case(
+ t.case(
customer_provided_endpoint='https://my-custom-s3/',
bucket='foo', key='bar',
expected_url='https://my-custom-s3/foo/bar')
- yield t.case(
+ t.case(
customer_provided_endpoint='https://my-custom-s3/',
bucket='bucket.dots', key='bar',
expected_url='https://my-custom-s3/bucket.dots/bar')
# Doesn't matter what region you specify, a custom endpoint url always
# wins.
- yield t.case(
+ t.case(
customer_provided_endpoint='https://my-custom-s3/',
region='us-west-2', bucket='foo', key='bar',
expected_url='https://my-custom-s3/foo/bar')
# Explicitly configuring "virtual" addressing_style.
virtual_hosting = {'addressing_style': 'virtual'}
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=virtual_hosting,
expected_url='https://bucket.s3.amazonaws.com/key')
- yield t.case(
+ t.case(
region='us-west-2', bucket='bucket', key='key',
s3_config=virtual_hosting,
expected_url='https://bucket.s3.us-west-2.amazonaws.com/key')
- yield t.case(
+ t.case(
region='eu-central-1', bucket='bucket', key='key',
s3_config=virtual_hosting,
expected_url='https://bucket.s3.eu-central-1.amazonaws.com/key')
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=virtual_hosting,
customer_provided_endpoint='https://foo.amazonaws.com',
expected_url='https://bucket.foo.amazonaws.com/key')
- yield t.case(
+ t.case(
region='unknown', bucket='bucket', key='key',
s3_config=virtual_hosting,
expected_url='https://bucket.s3.unknown.amazonaws.com/key')
# Test us-gov with virtual addressing.
- yield t.case(
+ t.case(
region='us-gov-west-1', bucket='bucket', key='key',
s3_config=virtual_hosting,
expected_url='https://bucket.s3.us-gov-west-1.amazonaws.com/key')
- yield t.case(
+ t.case(
region='us-gov-west-1', bucket='bucket', key='key',
signature_version='s3',
expected_url='https://bucket.s3.us-gov-west-1.amazonaws.com/key')
- yield t.case(
+ t.case(
region='fips-us-gov-west-1', bucket='bucket', key='key',
signature_version='s3',
expected_url='https://bucket.s3-fips.us-gov-west-1.amazonaws.com/key')
-
# Test path style addressing.
path_style = {'addressing_style': 'path'}
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=path_style,
expected_url='https://s3.amazonaws.com/bucket/key')
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=path_style,
customer_provided_endpoint='https://foo.amazonaws.com/',
expected_url='https://foo.amazonaws.com/bucket/key')
- yield t.case(
+ t.case(
region='unknown', bucket='bucket', key='key',
s3_config=path_style,
expected_url='https://s3.unknown.amazonaws.com/bucket/key')
# S3 accelerate
use_accelerate = {'use_accelerate_endpoint': True}
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_accelerate,
expected_url='https://bucket.s3-accelerate.amazonaws.com/key')
- yield t.case(
+ t.case(
# region is ignored with S3 accelerate.
region='us-west-2', bucket='bucket', key='key',
s3_config=use_accelerate,
expected_url='https://bucket.s3-accelerate.amazonaws.com/key')
# Provided endpoints still get recognized as accelerate endpoints.
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
customer_provided_endpoint='https://s3-accelerate.amazonaws.com',
expected_url='https://bucket.s3-accelerate.amazonaws.com/key')
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
customer_provided_endpoint='http://s3-accelerate.amazonaws.com',
expected_url='http://bucket.s3-accelerate.amazonaws.com/key')
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_accelerate, is_secure=False,
# Note we're using http:// because is_secure=False.
expected_url='http://bucket.s3-accelerate.amazonaws.com/key')
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
# s3-accelerate must be the first part of the url.
customer_provided_endpoint='https://foo.s3-accelerate.amazonaws.com',
expected_url='https://foo.s3-accelerate.amazonaws.com/bucket/key')
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
# The endpoint must be an Amazon endpoint.
customer_provided_endpoint='https://s3-accelerate.notamazon.com',
expected_url='https://s3-accelerate.notamazon.com/bucket/key')
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
# Extra components must be whitelisted.
customer_provided_endpoint='https://s3-accelerate.foo.amazonaws.com',
expected_url='https://s3-accelerate.foo.amazonaws.com/bucket/key')
- yield t.case(
+ t.case(
region='unknown', bucket='bucket', key='key',
s3_config=use_accelerate,
expected_url='https://bucket.s3-accelerate.amazonaws.com/key')
# Use virtual even if path is specified for s3 accelerate because
# path style will not work with S3 accelerate.
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config={'use_accelerate_endpoint': True,
'addressing_style': 'path'},
@@ -1589,17 +1588,17 @@ def test_correct_url_used_for_s3():
# S3 dual stack endpoints.
use_dualstack = {'use_dualstack_endpoint': True}
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_dualstack, signature_version='s3',
# Still default to virtual hosted when possible on sigv2.
expected_url='https://bucket.s3.dualstack.us-east-1.amazonaws.com/key')
- yield t.case(
+ t.case(
region=None, bucket='bucket', key='key',
s3_config=use_dualstack,
# Uses us-east-1 for no region set.
expected_url='https://bucket.s3.dualstack.us-east-1.amazonaws.com/key')
- yield t.case(
+ t.case(
region='aws-global', bucket='bucket', key='key',
s3_config=use_dualstack,
# Pseudo-regions should not have any special resolving logic even when
@@ -1608,32 +1607,32 @@ def test_correct_url_used_for_s3():
# region name.
expected_url=(
'https://bucket.s3.dualstack.aws-global.amazonaws.com/key'))
- yield t.case(
+ t.case(
region='us-west-2', bucket='bucket', key='key',
s3_config=use_dualstack, signature_version='s3',
# Still default to virtual hosted when possible on sigv2.
expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key')
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_dualstack, signature_version='s3v4',
expected_url='https://bucket.s3.dualstack.us-east-1.amazonaws.com/key')
- yield t.case(
+ t.case(
region='us-west-2', bucket='bucket', key='key',
s3_config=use_dualstack, signature_version='s3v4',
expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key')
- yield t.case(
+ t.case(
region='unknown', bucket='bucket', key='key',
s3_config=use_dualstack, signature_version='s3v4',
expected_url='https://bucket.s3.dualstack.unknown.amazonaws.com/key')
# Non DNS compatible buckets use path style for dual stack.
- yield t.case(
+ t.case(
region='us-west-2', bucket='bucket.dot', key='key',
s3_config=use_dualstack,
# Still default to virtual hosted when possible.
expected_url=(
'https://s3.dualstack.us-west-2.amazonaws.com/bucket.dot/key'))
# Supports is_secure (use_ssl=False in create_client()).
- yield t.case(
+ t.case(
region='us-west-2', bucket='bucket.dot', key='key', is_secure=False,
s3_config=use_dualstack,
# Still default to virtual hosted when possible.
@@ -1646,7 +1645,7 @@ def test_correct_url_used_for_s3():
'use_dualstack_endpoint': True,
'addressing_style': 'path',
}
- yield t.case(
+ t.case(
region='us-west-2', bucket='bucket', key='key',
s3_config=force_path_style,
# Still default to virtual hosted when possible.
@@ -1657,32 +1656,32 @@ def test_correct_url_used_for_s3():
'use_accelerate_endpoint': True,
'use_dualstack_endpoint': True,
}
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_accelerate_dualstack,
expected_url=(
'https://bucket.s3-accelerate.dualstack.amazonaws.com/key'))
- yield t.case(
+ t.case(
# Region is ignored with S3 accelerate.
region='us-west-2', bucket='bucket', key='key',
s3_config=use_accelerate_dualstack,
expected_url=(
'https://bucket.s3-accelerate.dualstack.amazonaws.com/key'))
# Only s3-accelerate overrides a customer endpoint.
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_dualstack,
customer_provided_endpoint='https://s3-accelerate.amazonaws.com',
expected_url=(
'https://bucket.s3-accelerate.amazonaws.com/key'))
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
# Dualstack is whitelisted.
customer_provided_endpoint=(
'https://s3-accelerate.dualstack.amazonaws.com'),
expected_url=(
'https://bucket.s3-accelerate.dualstack.amazonaws.com/key'))
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
# Even whitelisted parts cannot be duplicated.
customer_provided_endpoint=(
@@ -1690,7 +1689,7 @@ def test_correct_url_used_for_s3():
expected_url=(
'https://s3-accelerate.dualstack.dualstack'
'.amazonaws.com/bucket/key'))
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
# More than two extra parts is not allowed.
customer_provided_endpoint=(
@@ -1699,12 +1698,12 @@ def test_correct_url_used_for_s3():
expected_url=(
'https://s3-accelerate.dualstack.dualstack.dualstack.amazonaws.com'
'/bucket/key'))
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
# Extra components must be whitelisted.
customer_provided_endpoint='https://s3-accelerate.foo.amazonaws.com',
expected_url='https://s3-accelerate.foo.amazonaws.com/bucket/key')
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_accelerate_dualstack, is_secure=False,
# Note we're using http:// because is_secure=False.
@@ -1713,7 +1712,7 @@ def test_correct_url_used_for_s3():
# Use virtual even if path is specified for s3 accelerate because
# path style will not work with S3 accelerate.
use_accelerate_dualstack['addressing_style'] = 'path'
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_accelerate_dualstack,
expected_url=(
@@ -1723,14 +1722,14 @@ def test_correct_url_used_for_s3():
accesspoint_arn = (
'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint'
)
- yield t.case(
+ t.case(
region='us-west-2', bucket=accesspoint_arn, key='key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'us-west-2.amazonaws.com/key'
)
)
- yield t.case(
+ t.case(
region='us-west-2', bucket=accesspoint_arn, key='key',
s3_config={'use_arn_region': True},
expected_url=(
@@ -1738,21 +1737,21 @@ def test_correct_url_used_for_s3():
'us-west-2.amazonaws.com/key'
)
)
- yield t.case(
+ t.case(
region='us-west-2', bucket=accesspoint_arn, key='myendpoint/key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'us-west-2.amazonaws.com/myendpoint/key'
)
)
- yield t.case(
+ t.case(
region='us-west-2', bucket=accesspoint_arn, key='foo/myendpoint/key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'us-west-2.amazonaws.com/foo/myendpoint/key'
)
)
- yield t.case(
+ t.case(
# Note: The access-point arn has us-west-2 and the client's region is
# us-east-1, for the default case the access-point arn region is used.
region='us-east-1', bucket=accesspoint_arn, key='key',
@@ -1761,7 +1760,7 @@ def test_correct_url_used_for_s3():
'us-west-2.amazonaws.com/key'
)
)
- yield t.case(
+ t.case(
region='us-east-1', bucket=accesspoint_arn, key='key',
s3_config={'use_arn_region': False},
expected_url=(
@@ -1769,14 +1768,14 @@ def test_correct_url_used_for_s3():
'us-east-1.amazonaws.com/key'
)
)
- yield t.case(
+ t.case(
region='s3-external-1', bucket=accesspoint_arn, key='key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'us-west-2.amazonaws.com/key'
)
)
- yield t.case(
+ t.case(
region='s3-external-1', bucket=accesspoint_arn, key='key',
s3_config={'use_arn_region': False},
expected_url=(
@@ -1784,14 +1783,14 @@ def test_correct_url_used_for_s3():
's3-external-1.amazonaws.com/key'
)
)
- yield t.case(
+ t.case(
region='aws-global', bucket=accesspoint_arn, key='key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'us-west-2.amazonaws.com/key'
)
)
- yield t.case(
+ t.case(
region='aws-global', bucket=accesspoint_arn, key='key',
s3_config={'use_arn_region': False},
expected_url=(
@@ -1799,7 +1798,7 @@ def test_correct_url_used_for_s3():
'aws-global.amazonaws.com/key'
)
)
- yield t.case(
+ t.case(
region='unknown', bucket=accesspoint_arn, key='key',
s3_config={'use_arn_region': False},
expected_url=(
@@ -1807,7 +1806,7 @@ def test_correct_url_used_for_s3():
'unknown.amazonaws.com/key'
)
)
- yield t.case(
+ t.case(
region='unknown', bucket=accesspoint_arn, key='key',
s3_config={'use_arn_region': True},
expected_url=(
@@ -1818,21 +1817,21 @@ def test_correct_url_used_for_s3():
accesspoint_arn_cn = (
'arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint'
)
- yield t.case(
+ t.case(
region='cn-north-1', bucket=accesspoint_arn_cn, key='key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'cn-north-1.amazonaws.com.cn/key'
)
)
- yield t.case(
+ t.case(
region='cn-northwest-1', bucket=accesspoint_arn_cn, key='key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'cn-north-1.amazonaws.com.cn/key'
)
)
- yield t.case(
+ t.case(
region='cn-northwest-1', bucket=accesspoint_arn_cn, key='key',
s3_config={'use_arn_region': False},
expected_url=(
@@ -1843,21 +1842,21 @@ def test_correct_url_used_for_s3():
accesspoint_arn_gov = (
'arn:aws-us-gov:s3:us-gov-east-1:123456789012:accesspoint:myendpoint'
)
- yield t.case(
+ t.case(
region='us-gov-east-1', bucket=accesspoint_arn_gov, key='key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'us-gov-east-1.amazonaws.com/key'
)
)
- yield t.case(
+ t.case(
region='fips-us-gov-west-1', bucket=accesspoint_arn_gov, key='key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'us-gov-east-1.amazonaws.com/key'
)
)
- yield t.case(
+ t.case(
region='fips-us-gov-west-1', bucket=accesspoint_arn_gov, key='key',
s3_config={'use_arn_region': False},
expected_url=(
@@ -1866,7 +1865,7 @@ def test_correct_url_used_for_s3():
)
)
- yield t.case(
+ t.case(
region='us-west-2', bucket=accesspoint_arn, key='key', is_secure=False,
expected_url=(
'http://myendpoint-123456789012.s3-accesspoint.'
@@ -1874,7 +1873,7 @@ def test_correct_url_used_for_s3():
)
)
# Dual-stack with access-point arn
- yield t.case(
+ t.case(
# Note: The access-point arn has us-west-2 and the client's region is
# us-east-1, for the default case the access-point arn region is used.
region='us-east-1', bucket=accesspoint_arn, key='key',
@@ -1886,7 +1885,7 @@ def test_correct_url_used_for_s3():
'us-west-2.amazonaws.com/key'
)
)
- yield t.case(
+ t.case(
region='us-east-1', bucket=accesspoint_arn, key='key',
s3_config={
'use_dualstack_endpoint': True,
@@ -1897,7 +1896,7 @@ def test_correct_url_used_for_s3():
'us-east-1.amazonaws.com/key'
)
)
- yield t.case(
+ t.case(
region='us-gov-east-1', bucket=accesspoint_arn_gov, key='key',
s3_config={
'use_dualstack_endpoint': True,
@@ -1910,7 +1909,7 @@ def test_correct_url_used_for_s3():
# None of the various s3 settings related to paths should affect what
# endpoint to use when an access-point is provided.
- yield t.case(
+ t.case(
region='us-west-2', bucket=accesspoint_arn, key='key',
s3_config={'adressing_style': 'auto'},
expected_url=(
@@ -1918,7 +1917,7 @@ def test_correct_url_used_for_s3():
'us-west-2.amazonaws.com/key'
)
)
- yield t.case(
+ t.case(
region='us-west-2', bucket=accesspoint_arn, key='key',
s3_config={'adressing_style': 'virtual'},
expected_url=(
@@ -1926,7 +1925,7 @@ def test_correct_url_used_for_s3():
'us-west-2.amazonaws.com/key'
)
)
- yield t.case(
+ t.case(
region='us-west-2', bucket=accesspoint_arn, key='key',
s3_config={'adressing_style': 'path'},
expected_url=(
@@ -1939,27 +1938,27 @@ def test_correct_url_used_for_s3():
us_east_1_regional_endpoint = {
'us_east_1_regional_endpoint': 'regional'
}
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint,
expected_url=(
'https://bucket.s3.us-east-1.amazonaws.com/key'))
- yield t.case(
+ t.case(
region='us-west-2', bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint,
expected_url=(
'https://bucket.s3.us-west-2.amazonaws.com/key'))
- yield t.case(
+ t.case(
region=None, bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint,
expected_url=(
'https://bucket.s3.amazonaws.com/key'))
- yield t.case(
+ t.case(
region='unknown', bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint,
expected_url=(
'https://bucket.s3.unknown.amazonaws.com/key'))
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config={
'us_east_1_regional_endpoint': 'regional',
@@ -1967,7 +1966,7 @@ def test_correct_url_used_for_s3():
},
expected_url=(
'https://bucket.s3.dualstack.us-east-1.amazonaws.com/key'))
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config={
'us_east_1_regional_endpoint': 'regional',
@@ -1975,7 +1974,7 @@ def test_correct_url_used_for_s3():
},
expected_url=(
'https://bucket.s3-accelerate.amazonaws.com/key'))
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config={
'us_east_1_regional_endpoint': 'regional',
@@ -1989,19 +1988,19 @@ def test_correct_url_used_for_s3():
us_east_1_regional_endpoint_legacy = {
'us_east_1_regional_endpoint': 'legacy'
}
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint_legacy,
expected_url=(
'https://bucket.s3.amazonaws.com/key'))
- yield t.case(
+ t.case(
region=None, bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint_legacy,
expected_url=(
'https://bucket.s3.amazonaws.com/key'))
- yield t.case(
+ t.case(
region='unknown', bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint_legacy,
expected_url=(
@@ -2053,7 +2052,7 @@ def _verify_expected_endpoint_url(region, bucket, key, s3_config,
with ClientHTTPStubber(s3) as http_stubber:
http_stubber.add_response()
s3.put_object(Bucket=bucket, Key=key, Body=b'bar')
- assert_equal(http_stubber.requests[0].url, expected_url)
+ assert http_stubber.requests[0].url == expected_url
def _create_s3_client(region, is_secure, endpoint_url, s3_config,
@@ -2086,96 +2085,96 @@ def test_addressing_for_presigned_urls():
# us-east-1, or the "global" endpoint. A signature version of
# None means the user doesn't have signature version configured.
- yield t.case(region='us-east-1', bucket='bucket', key='key',
- signature_version=None,
- expected_url='https://bucket.s3.amazonaws.com/key')
- yield t.case(region='us-east-1', bucket='bucket', key='key',
- signature_version='s3',
- expected_url='https://bucket.s3.amazonaws.com/key')
- yield t.case(region='us-east-1', bucket='bucket', key='key',
- signature_version='s3v4',
- expected_url='https://bucket.s3.amazonaws.com/key')
- yield t.case(region='us-east-1', bucket='bucket', key='key',
- signature_version='s3v4',
- s3_config={'addressing_style': 'path'},
- expected_url='https://s3.amazonaws.com/bucket/key')
+ t.case(region='us-east-1', bucket='bucket', key='key',
+ signature_version=None,
+ expected_url='https://bucket.s3.amazonaws.com/key')
+ t.case(region='us-east-1', bucket='bucket', key='key',
+ signature_version='s3',
+ expected_url='https://bucket.s3.amazonaws.com/key')
+ t.case(region='us-east-1', bucket='bucket', key='key',
+ signature_version='s3v4',
+ expected_url='https://bucket.s3.amazonaws.com/key')
+ t.case(region='us-east-1', bucket='bucket', key='key',
+ signature_version='s3v4',
+ s3_config={'addressing_style': 'path'},
+ expected_url='https://s3.amazonaws.com/bucket/key')
# A region that supports both 's3' and 's3v4'.
- yield t.case(region='us-west-2', bucket='bucket', key='key',
- signature_version=None,
- expected_url='https://bucket.s3.amazonaws.com/key')
- yield t.case(region='us-west-2', bucket='bucket', key='key',
- signature_version='s3',
- expected_url='https://bucket.s3.amazonaws.com/key')
- yield t.case(region='us-west-2', bucket='bucket', key='key',
- signature_version='s3v4',
- expected_url='https://bucket.s3.amazonaws.com/key')
- yield t.case(region='us-west-2', bucket='bucket', key='key',
- signature_version='s3v4',
- s3_config={'addressing_style': 'path'},
- expected_url='https://s3.us-west-2.amazonaws.com/bucket/key')
+ t.case(region='us-west-2', bucket='bucket', key='key',
+ signature_version=None,
+ expected_url='https://bucket.s3.amazonaws.com/key')
+ t.case(region='us-west-2', bucket='bucket', key='key',
+ signature_version='s3',
+ expected_url='https://bucket.s3.amazonaws.com/key')
+ t.case(region='us-west-2', bucket='bucket', key='key',
+ signature_version='s3v4',
+ expected_url='https://bucket.s3.amazonaws.com/key')
+ t.case(region='us-west-2', bucket='bucket', key='key',
+ signature_version='s3v4',
+ s3_config={'addressing_style': 'path'},
+ expected_url='https://s3.us-west-2.amazonaws.com/bucket/key')
# An 's3v4' only region.
- yield t.case(region='us-east-2', bucket='bucket', key='key',
- signature_version=None,
- expected_url='https://bucket.s3.amazonaws.com/key')
- yield t.case(region='us-east-2', bucket='bucket', key='key',
- signature_version='s3',
- expected_url='https://bucket.s3.amazonaws.com/key')
- yield t.case(region='us-east-2', bucket='bucket', key='key',
- signature_version='s3v4',
- expected_url='https://bucket.s3.amazonaws.com/key')
- yield t.case(region='us-east-2', bucket='bucket', key='key',
- signature_version='s3v4',
- s3_config={'addressing_style': 'path'},
- expected_url='https://s3.us-east-2.amazonaws.com/bucket/key')
+ t.case(region='us-east-2', bucket='bucket', key='key',
+ signature_version=None,
+ expected_url='https://bucket.s3.amazonaws.com/key')
+ t.case(region='us-east-2', bucket='bucket', key='key',
+ signature_version='s3',
+ expected_url='https://bucket.s3.amazonaws.com/key')
+ t.case(region='us-east-2', bucket='bucket', key='key',
+ signature_version='s3v4',
+ expected_url='https://bucket.s3.amazonaws.com/key')
+ t.case(region='us-east-2', bucket='bucket', key='key',
+ signature_version='s3v4',
+ s3_config={'addressing_style': 'path'},
+ expected_url='https://s3.us-east-2.amazonaws.com/bucket/key')
# Dualstack endpoints
- yield t.case(
+ t.case(
region='us-west-2', bucket='bucket', key='key',
signature_version=None,
s3_config={'use_dualstack_endpoint': True},
expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key')
- yield t.case(
+ t.case(
region='us-west-2', bucket='bucket', key='key',
signature_version='s3',
s3_config={'use_dualstack_endpoint': True},
expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key')
- yield t.case(
+ t.case(
region='us-west-2', bucket='bucket', key='key',
signature_version='s3v4',
s3_config={'use_dualstack_endpoint': True},
expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key')
# Accelerate
- yield t.case(region='us-west-2', bucket='bucket', key='key',
- signature_version=None,
- s3_config={'use_accelerate_endpoint': True},
- expected_url='https://bucket.s3-accelerate.amazonaws.com/key')
+ t.case(region='us-west-2', bucket='bucket', key='key',
+ signature_version=None,
+ s3_config={'use_accelerate_endpoint': True},
+ expected_url='https://bucket.s3-accelerate.amazonaws.com/key')
# A region that we don't know about.
- yield t.case(region='us-west-50', bucket='bucket', key='key',
- signature_version=None,
- expected_url='https://bucket.s3.amazonaws.com/key')
+ t.case(region='us-west-50', bucket='bucket', key='key',
+ signature_version=None,
+ expected_url='https://bucket.s3.amazonaws.com/key')
# Customer provided URL results in us leaving the host untouched.
- yield t.case(region='us-west-2', bucket='bucket', key='key',
- signature_version=None,
- customer_provided_endpoint='https://foo.com/',
- expected_url='https://foo.com/bucket/key')
+ t.case(region='us-west-2', bucket='bucket', key='key',
+ signature_version=None,
+ customer_provided_endpoint='https://foo.com/',
+ expected_url='https://foo.com/bucket/key')
# Access-point
accesspoint_arn = (
'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint'
)
- yield t.case(
+ t.case(
region='us-west-2', bucket=accesspoint_arn, key='key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'us-west-2.amazonaws.com/key'
)
)
- yield t.case(
+ t.case(
region='us-east-1', bucket=accesspoint_arn, key='key',
s3_config={'use_arn_region': False},
expected_url=(
@@ -2188,12 +2187,12 @@ def test_addressing_for_presigned_urls():
us_east_1_regional_endpoint = {
'us_east_1_regional_endpoint': 'regional'
}
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint, signature_version='s3',
expected_url=(
'https://bucket.s3.us-east-1.amazonaws.com/key'))
- yield t.case(
+ t.case(
region='us-east-1', bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint, signature_version='s3v4',
expected_url=(
@@ -2215,4 +2214,4 @@ def _verify_presigned_url_addressing(region, bucket, key, s3_config,
# those are tested elsewhere. We just care about the hostname/path.
parts = urlsplit(url)
actual = '%s://%s%s' % parts[:3]
- assert_equal(actual, expected_url)
+ assert actual == expected_url
diff --git a/tests/functional/test_service_alias.py b/tests/functional/test_service_alias.py
index d82cfbcf..cd58e6ac 100644
--- a/tests/functional/test_service_alias.py
+++ b/tests/functional/test_service_alias.py
@@ -17,7 +17,7 @@ from botocore.handlers import SERVICE_NAME_ALIASES
def test_can_use_service_alias():
session = botocore.session.get_session()
for (alias, name) in SERVICE_NAME_ALIASES.items():
- yield _instantiates_the_same_client, session, name, alias
+ _instantiates_the_same_client(session, name, alias)
def _instantiates_the_same_client(session, service_name, service_alias):
diff --git a/tests/functional/test_service_names.py b/tests/functional/test_service_names.py
index dd831d32..09ba40d3 100644
--- a/tests/functional/test_service_names.py
+++ b/tests/functional/test_service_names.py
@@ -12,7 +12,6 @@
# language governing permissions and limitations under the License.
import re
-from nose.tools import assert_true
from botocore.session import get_session
BLACKLIST = [
@@ -41,18 +40,18 @@ MAX_SERVICE_NAME_LENGTH = 50
def _assert_name_length(service_name):
if service_name not in BLACKLIST:
service_name_length = len(service_name)
- assert_true(service_name_length >= MIN_SERVICE_NAME_LENGTH,
- 'Service name must be greater than or equal to 2 '
- 'characters in length.')
- assert_true(service_name_length <= MAX_SERVICE_NAME_LENGTH,
- 'Service name must be less than or equal to 50 '
- 'characters in length.')
+ assert service_name_length >= MIN_SERVICE_NAME_LENGTH, \
+ ('Service name must be greater than or equal to {:d} ' +
+ 'characters in length.').format(MIN_SERVICE_NAME_LENGTH)
+ assert service_name_length <= MAX_SERVICE_NAME_LENGTH, \
+ ('Service name must be less than or equal to {:d} ' +
+ 'characters in length.').format(MAX_SERVICE_NAME_LENGTH)
def _assert_name_pattern(service_name):
if service_name not in BLACKLIST:
- valid = VALID_NAME_REGEX.match(service_name) is not None
- assert_true(valid, VALID_NAME_EXPLANATION)
+ assert VALID_NAME_REGEX.match(service_name) is not None, \
+ VALID_NAME_EXPLANATION
def test_service_names_are_valid():
@@ -60,5 +59,5 @@ def test_service_names_are_valid():
loader = session.get_component('data_loader')
service_names = loader.list_available_services('service-2')
for service_name in service_names:
- yield _assert_name_length, service_name
- yield _assert_name_pattern, service_name
+ _assert_name_length(service_name)
+ _assert_name_pattern(service_name)
diff --git a/tests/functional/test_six_imports.py b/tests/functional/test_six_imports.py
index d3243a12..bab6a3f1 100644
--- a/tests/functional/test_six_imports.py
+++ b/tests/functional/test_six_imports.py
@@ -15,7 +15,7 @@ def test_no_bare_six_imports():
if not filename.endswith('.py'):
continue
fullname = os.path.join(rootdir, filename)
- yield _assert_no_bare_six_imports, fullname
+ _assert_no_bare_six_imports(fullname)
def _assert_no_bare_six_imports(filename):
diff --git a/tests/functional/test_stub.py b/tests/functional/test_stub.py
index 752ba224..4a30dcaa 100644
--- a/tests/functional/test_stub.py
+++ b/tests/functional/test_stub.py
@@ -16,6 +16,7 @@ from tests import unittest
import botocore
import botocore.session
import botocore.stub as stub
+from botocore.compat import six
from botocore.stub import Stubber
from botocore.exceptions import StubResponseError, ClientError, \
StubAssertionError, UnStubbedResponseError
@@ -54,8 +55,8 @@ class TestStubber(unittest.TestCase):
def test_activated_stubber_errors_with_no_registered_stubs(self):
self.stubber.activate()
# Params one per line for readability.
- with self.assertRaisesRegexp(UnStubbedResponseError,
- "Unexpected API Call"):
+ with six.assertRaisesRegex(self, UnStubbedResponseError,
+ "Unexpected API Call"):
self.client.list_objects(
Bucket='asdfasdfasdfasdf',
Delimiter='asdfasdfasdfasdf',
@@ -119,8 +120,8 @@ class TestStubber(unittest.TestCase):
'list_objects', service_response, expected_params)
self.stubber.activate()
# This should call should raise an for mismatching expected params.
- with self.assertRaisesRegexp(StubResponseError,
- "{'Bucket': 'bar'},\n"):
+ with six.assertRaisesRegex(self, StubResponseError,
+ "{'Bucket': 'bar'},\n"):
self.client.list_objects(Bucket='foo')
def test_expected_params_mixed_with_errors_responses(self):
@@ -143,7 +144,8 @@ class TestStubber(unittest.TestCase):
self.client.list_objects(Bucket='foo')
# The second call should throw an error for unexpected parameters
- with self.assertRaisesRegexp(StubResponseError, 'Expected parameters'):
+ with six.assertRaisesRegex(self, StubResponseError,
+ 'Expected parameters'):
self.client.list_objects(Bucket='foo')
def test_can_continue_to_call_after_expected_params_fail(self):
diff --git a/tests/functional/test_waiter_config.py b/tests/functional/test_waiter_config.py
index 4dfa2f7e..87646926 100644
--- a/tests/functional/test_waiter_config.py
+++ b/tests/functional/test_waiter_config.py
@@ -98,9 +98,9 @@ def test_lint_waiter_configs():
except UnknownServiceError:
# The service doesn't have waiters
continue
- yield _validate_schema, validator, waiter_model
+ _validate_schema(validator, waiter_model)
for waiter_name in client.waiter_names:
- yield _lint_single_waiter, client, waiter_name, service_model
+ _lint_single_waiter(client, waiter_name, service_model)
def _lint_single_waiter(client, waiter_name, service_model):
diff --git a/tests/integration/test_client.py b/tests/integration/test_client.py
index b06324bb..cdaa4286 100644
--- a/tests/integration/test_client.py
+++ b/tests/integration/test_client.py
@@ -79,8 +79,8 @@ class TestClientErrors(unittest.TestCase):
def test_region_mentioned_in_invalid_region(self):
client = self.session.create_client(
'cloudformation', region_name='us-east-999')
- with self.assertRaisesRegexp(EndpointConnectionError,
- 'Could not connect to the endpoint URL'):
+ with six.assertRaisesRegex(self, EndpointConnectionError,
+ 'Could not connect to the endpoint URL'):
client.list_stacks()
def test_client_modeled_exception(self):
diff --git a/tests/integration/test_ec2.py b/tests/integration/test_ec2.py
index cc8dcf09..b6fe5826 100644
--- a/tests/integration/test_ec2.py
+++ b/tests/integration/test_ec2.py
@@ -13,8 +13,6 @@
from tests import unittest
import itertools
-from nose.plugins.attrib import attr
-
import botocore.session
from botocore.exceptions import ClientError
diff --git a/tests/integration/test_emr.py b/tests/integration/test_emr.py
index a06b4e35..d3b898b1 100644
--- a/tests/integration/test_emr.py
+++ b/tests/integration/test_emr.py
@@ -12,8 +12,6 @@
# language governing permissions and limitations under the License.
from tests import unittest
-from nose.tools import assert_true
-
import botocore.session
from botocore.paginate import PageIterator
from botocore.exceptions import OperationNotPageableError
@@ -34,7 +32,7 @@ def test_emr_endpoints_work_with_py26():
def _test_can_list_clusters_in_region(session, region):
client = session.create_client('emr', region_name=region)
response = client.list_clusters()
- assert_true('Clusters' in response)
+ assert 'Clusters' in response
# I consider these integration tests because they're
diff --git a/tests/integration/test_s3.py b/tests/integration/test_s3.py
index e1d275a9..fea3e061 100644
--- a/tests/integration/test_s3.py
+++ b/tests/integration/test_s3.py
@@ -22,11 +22,10 @@ import tempfile
import shutil
import threading
import logging
-import mock
from tarfile import TarFile
from contextlib import closing
-from nose.plugins.attrib import attr
+import pytest
import urllib3
from botocore.endpoint import Endpoint
@@ -324,7 +323,7 @@ class TestS3Objects(TestS3BaseWithBucket):
Bucket=self.bucket_name, Key=key_name)
self.assert_status_code(response, 204)
- @attr('slow')
+ @pytest.mark.slow
def test_can_paginate(self):
for i in range(5):
key_name = 'key%s' % i
@@ -340,7 +339,7 @@ class TestS3Objects(TestS3BaseWithBucket):
for el in responses]
self.assertEqual(key_names, ['key0', 'key1', 'key2', 'key3', 'key4'])
- @attr('slow')
+ @pytest.mark.slow
def test_can_paginate_with_page_size(self):
for i in range(5):
key_name = 'key%s' % i
@@ -357,7 +356,7 @@ class TestS3Objects(TestS3BaseWithBucket):
for el in data]
self.assertEqual(key_names, ['key0', 'key1', 'key2', 'key3', 'key4'])
- @attr('slow')
+ @pytest.mark.slow
def test_result_key_iters(self):
for i in range(5):
key_name = 'key/%s/%s' % (i, i)
@@ -380,7 +379,7 @@ class TestS3Objects(TestS3BaseWithBucket):
self.assertIn('Contents', response)
self.assertIn('CommonPrefixes', response)
- @attr('slow')
+ @pytest.mark.slow
def test_can_get_and_put_object(self):
self.create_object('foobarbaz', body='body contents')
time.sleep(3)
@@ -930,7 +929,7 @@ class TestS3SigV4Client(BaseS3ClientTest):
Key='foo.txt', Body=body)
self.assert_status_code(response, 200)
- @attr('slow')
+ @pytest.mark.slow
def test_paginate_list_objects_unicode(self):
key_names = [
u'non-ascii-key-\xe4\xf6\xfc-01.txt',
@@ -953,7 +952,7 @@ class TestS3SigV4Client(BaseS3ClientTest):
self.assertEqual(key_names, key_refs)
- @attr('slow')
+ @pytest.mark.slow
def test_paginate_list_objects_safe_chars(self):
key_names = [
u'-._~safe-chars-key-01.txt',
@@ -1247,7 +1246,7 @@ class TestRegionRedirect(BaseS3ClientTest):
eu_bucket = self.create_bucket(self.bucket_region)
msg = 'The authorization mechanism you have provided is not supported.'
- with self.assertRaisesRegexp(ClientError, msg):
+ with six.assertRaisesRegex(self, ClientError, msg):
sigv2_client.list_objects(Bucket=eu_bucket)
def test_region_redirects_multiple_requests(self):
diff --git a/tests/integration/test_smoke.py b/tests/integration/test_smoke.py
index d8a164a1..d6c6aeee 100644
--- a/tests/integration/test_smoke.py
+++ b/tests/integration/test_smoke.py
@@ -11,17 +11,14 @@ to use and all the services in SMOKE_TESTS/ERROR_TESTS will be tested.
"""
import os
-import mock
from pprint import pformat
import warnings
import logging
-from nose.tools import assert_equal, assert_true
from tests import ClientHTTPStubber
from botocore import xform_name
import botocore.session
from botocore.client import ClientError
-from botocore.endpoint import Endpoint
from botocore.exceptions import ConnectionClosedError
@@ -262,10 +259,9 @@ def _make_client_call(client, operation_name, kwargs):
method = getattr(client, operation_name)
with warnings.catch_warnings(record=True) as caught_warnings:
response = method(**kwargs)
- assert_equal(len(caught_warnings), 0,
- "Warnings were emitted during smoke test: %s"
- % caught_warnings)
- assert_true('Errors' not in response)
+ assert len(caught_warnings) == 0, \
+ "Warnings were emitted during smoke test: %s" % caught_warnings
+ assert 'Errors' not in response
def test_can_make_request_and_understand_errors_with_client():
@@ -275,7 +271,7 @@ def test_can_make_request_and_understand_errors_with_client():
for operation_name in ERROR_TESTS[service_name]:
kwargs = ERROR_TESTS[service_name][operation_name]
method_name = xform_name(operation_name)
- yield _make_error_client_call, client, method_name, kwargs
+ _make_error_client_call(client, method_name, kwargs)
def _make_error_client_call(client, operation_name, kwargs):
diff --git a/tests/integration/test_sts.py b/tests/integration/test_sts.py
index 91f6898f..33e3d0a2 100644
--- a/tests/integration/test_sts.py
+++ b/tests/integration/test_sts.py
@@ -13,6 +13,8 @@
from tests import unittest
import botocore.session
+
+from botocore.compat import six
from botocore.exceptions import ClientError
class TestSTS(unittest.TestCase):
@@ -38,5 +40,5 @@ class TestSTS(unittest.TestCase):
self.assertEqual(sts.meta.endpoint_url,
'https://sts.us-west-2.amazonaws.com')
# Signing error will be thrown with the incorrect region name included.
- with self.assertRaisesRegexp(ClientError, 'ap-southeast-1') as e:
+ with six.assertRaisesRegex(self, ClientError, 'ap-southeast-1'):
sts.get_session_token()
diff --git a/tests/integration/test_waiters.py b/tests/integration/test_waiters.py
index b27b3b0b..99cef057 100644
--- a/tests/integration/test_waiters.py
+++ b/tests/integration/test_waiters.py
@@ -12,14 +12,14 @@
# language governing permissions and limitations under the License.
from tests import unittest, random_chars
-from nose.plugins.attrib import attr
+import pytest
import botocore.session
from botocore.exceptions import WaiterError
# This is the same test as above, except using the client interface.
-@attr('slow')
+@pytest.mark.slow
class TestWaiterForDynamoDB(unittest.TestCase):
def setUp(self):
self.session = botocore.session.get_session()
diff --git a/tests/unit/auth/test_sigv4.py b/tests/unit/auth/test_sigv4.py
index 2d28a000..fa711192 100644
--- a/tests/unit/auth/test_sigv4.py
+++ b/tests/unit/auth/test_sigv4.py
@@ -18,8 +18,7 @@ AWS provides a test suite for signature version 4:
http://docs.aws.amazon.com/general/latest/gr/signature-v4-test-suite.html
This module contains logic to run these tests. The test files were
-placed in ./aws4_testsuite, and we're using nose's test generators to
-dynamically generate testcases based on these files.
+placed in ./aws4_testsuite.
"""
import os
@@ -106,7 +105,7 @@ def test_generator():
if test_case in TESTS_TO_IGNORE:
log.debug("Skipping test: %s", test_case)
continue
- yield (_test_signature_version_4, test_case)
+ _test_signature_version_4(test_case)
datetime_patcher.stop()
formatdate_patcher.stop()
@@ -147,21 +146,22 @@ def _test_signature_version_4(test_case):
auth = botocore.auth.SigV4Auth(test_case.credentials, 'host', 'us-east-1')
actual_canonical_request = auth.canonical_request(request)
- assert_equal(actual_canonical_request, test_case.canonical_request,
- test_case.raw_request, 'canonical_request')
+ assert_requests_equal(actual_canonical_request,
+ test_case.canonical_request,
+ test_case.raw_request, 'canonical_request')
actual_string_to_sign = auth.string_to_sign(request,
actual_canonical_request)
- assert_equal(actual_string_to_sign, test_case.string_to_sign,
- test_case.raw_request, 'string_to_sign')
+ assert_requests_equal(actual_string_to_sign, test_case.string_to_sign,
+ test_case.raw_request, 'string_to_sign')
auth.add_auth(request)
actual_auth_header = request.headers['Authorization']
- assert_equal(actual_auth_header, test_case.authorization_header,
- test_case.raw_request, 'authheader')
+ assert_requests_equal(actual_auth_header, test_case.authorization_header,
+ test_case.raw_request, 'authheader')
-def assert_equal(actual, expected, raw_request, part):
+def assert_requests_equal(actual, expected, raw_request, part):
if actual != expected:
message = "The %s did not match" % part
message += "\nACTUAL:%r !=\nEXPECT:%r" % (actual, expected)
diff --git a/tests/unit/docs/test_utils.py b/tests/unit/docs/test_utils.py
index c526d24f..b3dae2b7 100644
--- a/tests/unit/docs/test_utils.py
+++ b/tests/unit/docs/test_utils.py
@@ -223,5 +223,5 @@ class TestAppendParamDocumentation(BaseDocsTest):
class TestEscapeControls(unittest.TestCase):
def test_escapes_controls(self):
escaped = escape_controls('\na\rb\tc\fd\be')
- self.assertEquals(escaped, '\\na\\rb\\tc\\fd\\be')
+ self.assertEqual(escaped, '\\na\\rb\\tc\\fd\\be')
diff --git a/tests/unit/response_parsing/README.rst b/tests/unit/response_parsing/README.rst
index 9e00c1d1..67ccfe53 100644
--- a/tests/unit/response_parsing/README.rst
+++ b/tests/unit/response_parsing/README.rst
@@ -16,12 +16,12 @@ response sent from the server for that particular request and the JSON
file contains the expected Python data structure created from the XML
response.
-The main test is contained in ``test_response_parser.py`` and is
-implemented as a nose generator. Each time through the loop an XML
-file is read and passed to a ``botocore.response.XmlResponse``
-object. The corresponding JSON file is then parsed and compared to
-the value created by the parser. If the are equal, the test passes. If
-they are not equal, both the expected result and the actual result are
+The main test is contained in ``test_response_parser.py``. Each
+time through the loop an XML file is read and passed to
+a ``botocore.response.XmlResponse`` object. The corresponding
+JSON file is then parsed and compared to the value created by the
+parser. If the are equal, the test passes. If they are not
+equal, both the expected result and the actual result are
pretty-printed to stdout and the tests continue.
-----------------
diff --git a/tests/unit/response_parsing/test_response_parsing.py b/tests/unit/response_parsing/test_response_parsing.py
index b182b214..421326b6 100644
--- a/tests/unit/response_parsing/test_response_parsing.py
+++ b/tests/unit/response_parsing/test_response_parsing.py
@@ -119,8 +119,8 @@ def test_xml_parsing():
expected = _get_expected_parsed_result(xmlfile)
operation_model = _get_operation_model(service_model, xmlfile)
raw_response_body = _get_raw_response_body(xmlfile)
- yield _test_parsed_response, xmlfile, raw_response_body, \
- operation_model, expected
+ _test_parsed_response(xmlfile, raw_response_body,
+ operation_model, expected)
def _get_raw_response_body(xmlfile):
@@ -179,8 +179,8 @@ def test_json_errors_parsing():
operation_model = service_model.operation_model(op_name)
with open(raw_response_file, 'rb') as f:
raw_response_body = f.read()
- yield _test_parsed_response, raw_response_file, \
- raw_response_body, operation_model, expected
+ _test_parsed_response(raw_response_file,
+ raw_response_body, operation_model, expected)
def _uhg_test_json_parsing():
diff --git a/tests/unit/retries/test_special.py b/tests/unit/retries/test_special.py
index 1ebbfc06..2929c923 100644
--- a/tests/unit/retries/test_special.py
+++ b/tests/unit/retries/test_special.py
@@ -1,9 +1,7 @@
from tests import unittest
import mock
-from nose.tools import assert_equal, assert_is_instance
-from botocore.compat import six
from botocore.awsrequest import AWSResponse
from botocore.retries import standard, special
diff --git a/tests/unit/retries/test_standard.py b/tests/unit/retries/test_standard.py
index a88c7995..063823ef 100644
--- a/tests/unit/retries/test_standard.py
+++ b/tests/unit/retries/test_standard.py
@@ -1,7 +1,6 @@
from tests import unittest
import mock
-from nose.tools import assert_equal, assert_is_instance
from botocore.retries import standard
from botocore.retries import quota
@@ -154,22 +153,20 @@ SERVICE_DESCRIPTION_WITH_RETRIES = {
def test_can_detect_retryable_transient_errors():
transient_checker = standard.TransientRetryableChecker()
for case in RETRYABLE_TRANSIENT_ERRORS:
- yield (_verify_retryable, transient_checker, None) + case
+ _verify_retryable(transient_checker, None, *case)
def test_can_detect_retryable_throttled_errors():
throttled_checker = standard.ThrottledRetryableChecker()
for case in RETRYABLE_THROTTLED_RESPONSES:
- yield (_verify_retryable, throttled_checker, None) + case
+ _verify_retryable(throttled_checker, None, *case)
def test_can_detect_modeled_retryable_errors():
modeled_retry_checker = standard.ModeledRetryableChecker()
- test_params = (_verify_retryable, modeled_retry_checker,
- get_operation_model_with_retries())
for case in RETRYABLE_MODELED_ERRORS:
- test_case = test_params + case
- yield test_case
+ _verify_retryable(modeled_retry_checker,
+ get_operation_model_with_retries(), *case)
def test_standard_retry_conditions():
@@ -184,9 +181,8 @@ def test_standard_retry_conditions():
# are retryable for a different checker. We need to filter out all
# the False cases.
all_cases = [c for c in all_cases if c[2]]
- test_params = (_verify_retryable, standard_checker, op_model)
for case in all_cases:
- yield test_params + case
+ _verify_retryable(standard_checker, op_model, *case)
def get_operation_model_with_retries():
@@ -213,7 +209,7 @@ def _verify_retryable(checker, operation_model,
http_response=http_response,
caught_exception=caught_exception,
)
- assert_equal(checker.is_retryable(context), is_retryable)
+ assert checker.is_retryable(context) == is_retryable
def arbitrary_retry_context():
@@ -233,36 +229,36 @@ def test_can_honor_max_attempts():
checker = standard.MaxAttemptsChecker(max_attempts=3)
context = arbitrary_retry_context()
context.attempt_number = 1
- assert_equal(checker.is_retryable(context), True)
+ assert checker.is_retryable(context)
context.attempt_number = 2
- assert_equal(checker.is_retryable(context), True)
+ assert checker.is_retryable(context)
context.attempt_number = 3
- assert_equal(checker.is_retryable(context), False)
+ assert not checker.is_retryable(context)
def test_max_attempts_adds_metadata_key_when_reached():
checker = standard.MaxAttemptsChecker(max_attempts=3)
context = arbitrary_retry_context()
context.attempt_number = 3
- assert_equal(checker.is_retryable(context), False)
- assert_equal(context.get_retry_metadata(), {'MaxAttemptsReached': True})
+ assert not checker.is_retryable(context)
+ assert context.get_retry_metadata() == {'MaxAttemptsReached': True}
def test_can_create_default_retry_handler():
mock_client = mock.Mock()
mock_client.meta.service_model.service_id = model.ServiceId('my-service')
- assert_is_instance(standard.register_retry_handler(mock_client),
- standard.RetryHandler)
+ assert isinstance(standard.register_retry_handler(mock_client),
+ standard.RetryHandler)
call_args_list = mock_client.meta.events.register.call_args_list
# We should have registered the retry quota to after-calls
first_call = call_args_list[0][0]
second_call = call_args_list[1][0]
# Not sure if there's a way to verify the class associated with the
# bound method matches what we expect.
- assert_equal(first_call[0], 'after-call.my-service')
- assert_equal(second_call[0], 'needs-retry.my-service')
+ assert first_call[0] == 'after-call.my-service'
+ assert second_call[0] == 'needs-retry.my-service'
class TestRetryHandler(unittest.TestCase):
diff --git a/tests/unit/test_awsrequest.py b/tests/unit/test_awsrequest.py
index 17c041f3..e884af7c 100644
--- a/tests/unit/test_awsrequest.py
+++ b/tests/unit/test_awsrequest.py
@@ -20,7 +20,10 @@ import io
import socket
import sys
-from mock import Mock, patch
+try:
+ from mock import Mock, patch
+except ImportError:
+ from unittest.mock import Mock, patch
from urllib3.connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from botocore.exceptions import UnseekableStreamError
@@ -271,11 +274,11 @@ class TestAWSResponse(unittest.TestCase):
def test_text_property(self):
self.set_raw_stream([b'\xe3\x82\xb8\xe3\x83\xa7\xe3\x82\xb0'])
self.response.headers['content-type'] = 'text/plain; charset=utf-8'
- self.assertEquals(self.response.text, u'\u30b8\u30e7\u30b0')
+ self.assertEqual(self.response.text, u'\u30b8\u30e7\u30b0')
def test_text_property_defaults_utf8(self):
self.set_raw_stream([b'\xe3\x82\xb8\xe3\x83\xa7\xe3\x82\xb0'])
- self.assertEquals(self.response.text, u'\u30b8\u30e7\u30b0')
+ self.assertEqual(self.response.text, u'\u30b8\u30e7\u30b0')
class TestAWSHTTPConnection(unittest.TestCase):
diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py
index a50b904d..2f6384f9 100644
--- a/tests/unit/test_client.py
+++ b/tests/unit/test_client.py
@@ -554,8 +554,8 @@ class TestAutoGeneratedClient(unittest.TestCase):
creator = self.create_client_creator()
service_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
- with self.assertRaisesRegexp(
- TypeError, 'only accepts keyword arguments'):
+ with six.assertRaisesRegex(self, TypeError,
+ 'only accepts keyword arguments'):
service_client.test_operation('foo')
@mock.patch('botocore.args.RequestSigner.sign')
@@ -1550,15 +1550,15 @@ class TestConfig(unittest.TestCase):
self.assertEqual(config.read_timeout, 50)
def test_invalid_kwargs(self):
- with self.assertRaisesRegexp(TypeError, 'Got unexpected keyword'):
+ with six.assertRaisesRegex(self, TypeError, 'Got unexpected keyword'):
botocore.config.Config(foo='foo')
def test_pass_invalid_length_of_args(self):
- with self.assertRaisesRegexp(TypeError, 'Takes at most'):
+ with six.assertRaisesRegex(self, TypeError, 'Takes at most'):
botocore.config.Config('foo', *botocore.config.Config.OPTION_DEFAULTS.values())
def test_create_with_multiple_kwargs(self):
- with self.assertRaisesRegexp(TypeError, 'Got multiple values'):
+ with six.assertRaisesRegex(self, TypeError, 'Got multiple values'):
botocore.config.Config('us-east-1', region_name='us-east-1')
def test_merge_returns_new_config_object(self):
@@ -1610,10 +1610,10 @@ class TestConfig(unittest.TestCase):
self.assertEqual(config.retries['max_attempts'], 15)
def test_validates_retry_config(self):
- with self.assertRaisesRegexp(
- InvalidRetryConfigurationError,
- 'Cannot provide retry configuration for "not-allowed"'):
- botocore.config.Config(retries={'not-allowed': True})
+ with six.assertRaisesRegex(
+ self, InvalidRetryConfigurationError,
+ 'Cannot provide retry configuration for "not-allowed"'):
+ botocore.config.Config(retries={'not-allowed': True})
def test_validates_max_retry_attempts(self):
with self.assertRaises(InvalidMaxRetryAttemptsError):
diff --git a/tests/unit/test_compat.py b/tests/unit/test_compat.py
index 3cdd1dce..40b7d9e2 100644
--- a/tests/unit/test_compat.py
+++ b/tests/unit/test_compat.py
@@ -13,8 +13,6 @@
import datetime
import mock
-from nose.tools import assert_equal, assert_raises
-
from botocore.exceptions import MD5UnavailableError
from botocore.compat import (
total_seconds, unquote_str, six, ensure_bytes, get_md5,
@@ -98,80 +96,76 @@ class TestGetMD5(unittest.TestCase):
get_md5()
-def test_compat_shell_split_windows():
- windows_cases = {
- r'': [],
- r'spam \\': [r'spam', '\\\\'],
- r'spam ': [r'spam'],
- r' spam': [r'spam'],
- 'spam eggs': [r'spam', r'eggs'],
- 'spam\teggs': [r'spam', r'eggs'],
- 'spam\neggs': ['spam\neggs'],
- '""': [''],
- '" "': [' '],
- '"\t"': ['\t'],
- '\\\\': ['\\\\'],
- '\\\\ ': ['\\\\'],
- '\\\\\t': ['\\\\'],
- r'\"': ['"'],
- # The following four test cases are official test cases given in
- # Microsoft's documentation.
- r'"abc" d e': [r'abc', r'd', r'e'],
- r'a\\b d"e f"g h': [r'a\\b', r'de fg', r'h'],
- r'a\\\"b c d': [r'a\"b', r'c', r'd'],
- r'a\\\\"b c" d e': [r'a\\b c', r'd', r'e']
- }
- runner = ShellSplitTestRunner()
- for input_string, expected_output in windows_cases.items():
- yield runner.assert_equal, input_string, expected_output, "win32"
-
- yield runner.assert_raises, r'"', ValueError, "win32"
-
-
-def test_compat_shell_split_unix():
- unix_cases = {
- r'': [],
- r'spam \\': [r'spam', '\\'],
- r'spam ': [r'spam'],
- r' spam': [r'spam'],
- 'spam eggs': [r'spam', r'eggs'],
- 'spam\teggs': [r'spam', r'eggs'],
- 'spam\neggs': ['spam', 'eggs'],
- '""': [''],
- '" "': [' '],
- '"\t"': ['\t'],
- '\\\\': ['\\'],
- '\\\\ ': ['\\'],
- '\\\\\t': ['\\'],
- r'\"': ['"'],
- # The following four test cases are official test cases given in
- # Microsoft's documentation, but adapted to unix shell splitting.
- r'"abc" d e': [r'abc', r'd', r'e'],
- r'a\\b d"e f"g h': [r'a\b', r'de fg', r'h'],
- r'a\\\"b c d': [r'a\"b', r'c', r'd'],
- r'a\\\\"b c" d e': [r'a\\b c', r'd', r'e']
- }
- runner = ShellSplitTestRunner()
- for input_string, expected_output in unix_cases.items():
- yield runner.assert_equal, input_string, expected_output, "linux2"
- yield runner.assert_equal, input_string, expected_output, "darwin"
-
- yield runner.assert_raises, r'"', ValueError, "linux2"
- yield runner.assert_raises, r'"', ValueError, "darwin"
-
-
-class ShellSplitTestRunner(object):
- def assert_equal(self, s, expected, platform):
- assert_equal(compat_shell_split(s, platform), expected)
-
- def assert_raises(self, s, exception_cls, platform):
- assert_raises(exception_cls, compat_shell_split, s, platform)
+class TestCompatShellSplit(unittest.TestCase):
+ def test_compat_shell_split_windows(self):
+ windows_cases = {
+ r'': [],
+ r'spam \\': [r'spam', '\\\\'],
+ r'spam ': [r'spam'],
+ r' spam': [r'spam'],
+ 'spam eggs': [r'spam', r'eggs'],
+ 'spam\teggs': [r'spam', r'eggs'],
+ 'spam\neggs': ['spam\neggs'],
+ '""': [''],
+ '" "': [' '],
+ '"\t"': ['\t'],
+ '\\\\': ['\\\\'],
+ '\\\\ ': ['\\\\'],
+ '\\\\\t': ['\\\\'],
+ r'\"': ['"'],
+ # The following four test cases are official test cases given in
+ # Microsoft's documentation.
+ r'"abc" d e': [r'abc', r'd', r'e'],
+ r'a\\b d"e f"g h': [r'a\\b', r'de fg', r'h'],
+ r'a\\\"b c d': [r'a\"b', r'c', r'd'],
+ r'a\\\\"b c" d e': [r'a\\b c', r'd', r'e']
+ }
+ for input_string, expected_output in windows_cases.items():
+ self.assertEqual(compat_shell_split(input_string, "win32"),
+ expected_output)
+
+ with self.assertRaises(ValueError):
+ compat_shell_split(r'"', "win32")
+
+ def test_compat_shell_split_unix(self):
+ unix_cases = {
+ r'': [],
+ r'spam \\': [r'spam', '\\'],
+ r'spam ': [r'spam'],
+ r' spam': [r'spam'],
+ 'spam eggs': [r'spam', r'eggs'],
+ 'spam\teggs': [r'spam', r'eggs'],
+ 'spam\neggs': ['spam', 'eggs'],
+ '""': [''],
+ '" "': [' '],
+ '"\t"': ['\t'],
+ '\\\\': ['\\'],
+ '\\\\ ': ['\\'],
+ '\\\\\t': ['\\'],
+ r'\"': ['"'],
+ # The following four test cases are official test cases given in
+ # Microsoft's documentation, but adapted to unix shell splitting.
+ r'"abc" d e': [r'abc', r'd', r'e'],
+ r'a\\b d"e f"g h': [r'a\b', r'de fg', r'h'],
+ r'a\\\"b c d': [r'a\"b', r'c', r'd'],
+ r'a\\\\"b c" d e': [r'a\\b c', r'd', r'e']
+ }
+ for input_string, expected_output in unix_cases.items():
+ self.assertEqual(compat_shell_split(input_string, "linux2"),
+ expected_output)
+ self.assertEqual(compat_shell_split(input_string, "darwin"),
+ expected_output)
+
+ with self.assertRaises(ValueError):
+ compat_shell_split(r'"', "linux2")
+ with self.assertRaises(ValueError):
+ compat_shell_split(r'"', "darwin")
class TestTimezoneOperations(unittest.TestCase):
def test_get_tzinfo_options(self):
options = get_tzinfo_options()
- self.assertTrue(len(options) > 0)
+ self.assertGreater(len(options), 0)
for tzinfo in options:
self.assertIsInstance(tzinfo(), datetime.tzinfo)
diff --git a/tests/unit/test_config_provider.py b/tests/unit/test_config_provider.py
index 4f7d07c6..c17a572c 100644
--- a/tests/unit/test_config_provider.py
+++ b/tests/unit/test_config_provider.py
@@ -12,7 +12,6 @@
# language governing permissions and limitations under the License.
from tests import unittest
import mock
-from nose.tools import assert_equal
import botocore
import botocore.session as session
@@ -308,7 +307,7 @@ class TestConfigValueStore(unittest.TestCase):
provider = ConfigValueStore()
provider.set_config_variable('fake_variable', 'foo')
value = provider.get_config_variable('fake_variable')
- self.assertEquals(value, 'foo')
+ self.assertEqual(value, 'foo')
def test_can_set_config_provider(self):
foo_value_provider = mock.Mock(spec=BaseProvider)
@@ -448,7 +447,7 @@ def assert_chain_does_provide(providers, expected_value):
providers=providers,
)
value = provider.provide()
- assert_equal(value, expected_value)
+ assert value == expected_value
def test_chain_provider():
@@ -468,9 +467,9 @@ def test_chain_provider():
('foo', ['foo', 'bar', 'baz']),
]
for case in cases:
- yield assert_chain_does_provide, \
- _make_providers_that_return(case[1]), \
- case[0]
+ assert_chain_does_provide(
+ _make_providers_that_return(case[1]),
+ case[0])
class TestChainProvider(unittest.TestCase):
diff --git a/tests/unit/test_credentials.py b/tests/unit/test_credentials.py
index 62bbef93..06fee6f6 100644
--- a/tests/unit/test_credentials.py
+++ b/tests/unit/test_credentials.py
@@ -1083,7 +1083,7 @@ class TestEnvVar(BaseEnvVar):
"Credentials were refreshed, but the refreshed credentials are "
"still expired."
)
- with self.assertRaisesRegexp(RuntimeError, error_message):
+ with six.assertRaisesRegex(self, RuntimeError, error_message):
creds.get_frozen_credentials()
def test_partial_creds_is_an_error(self):
@@ -1149,7 +1149,7 @@ class TestEnvVar(BaseEnvVar):
"Credentials were refreshed, but the refreshed credentials are "
"still expired."
)
- with self.assertRaisesRegexp(RuntimeError, error_message):
+ with six.assertRaisesRegex(self, RuntimeError, error_message):
creds.get_frozen_credentials()
# Now we update the environment with non-expired credentials,
@@ -2746,7 +2746,7 @@ class TestRefreshLogic(unittest.TestCase):
mandatory_refresh=7,
refresh_function=fail_refresh
)
- with self.assertRaisesRegexp(Exception, 'refresh failed'):
+ with six.assertRaisesRegex(self, Exception, 'refresh failed'):
creds.get_frozen_credentials()
def test_exception_propogated_on_expired_credentials(self):
@@ -2759,7 +2759,7 @@ class TestRefreshLogic(unittest.TestCase):
mandatory_refresh=7,
refresh_function=fail_refresh
)
- with self.assertRaisesRegexp(Exception, 'refresh failed'):
+ with six.assertRaisesRegex(self, Exception, 'refresh failed'):
# Because credentials are actually expired, any
# failure to refresh should be propagated.
creds.get_frozen_credentials()
@@ -2780,7 +2780,7 @@ class TestRefreshLogic(unittest.TestCase):
creds_last_for=-2,
)
err_msg = 'refreshed credentials are still expired'
- with self.assertRaisesRegexp(RuntimeError, err_msg):
+ with six.assertRaisesRegex(self, RuntimeError, err_msg):
# Because credentials are actually expired, any
# failure to refresh should be propagated.
creds.get_frozen_credentials()
@@ -3068,7 +3068,7 @@ class TestProcessProvider(BaseEnvVar):
provider = self.create_process_provider()
exception = botocore.exceptions.CredentialRetrievalError
- with self.assertRaisesRegexp(exception, 'Error Message'):
+ with six.assertRaisesRegex(self, exception, 'Error Message'):
provider.load()
def test_unsupported_version_raises_mismatch(self):
@@ -3086,7 +3086,7 @@ class TestProcessProvider(BaseEnvVar):
provider = self.create_process_provider()
exception = botocore.exceptions.CredentialRetrievalError
- with self.assertRaisesRegexp(exception, 'Unsupported version'):
+ with six.assertRaisesRegex(self, exception, 'Unsupported version'):
provider.load()
def test_missing_version_in_payload_returned_raises_exception(self):
@@ -3103,7 +3103,7 @@ class TestProcessProvider(BaseEnvVar):
provider = self.create_process_provider()
exception = botocore.exceptions.CredentialRetrievalError
- with self.assertRaisesRegexp(exception, 'Unsupported version'):
+ with six.assertRaisesRegex(self, exception, 'Unsupported version'):
provider.load()
def test_missing_access_key_raises_exception(self):
@@ -3120,7 +3120,7 @@ class TestProcessProvider(BaseEnvVar):
provider = self.create_process_provider()
exception = botocore.exceptions.CredentialRetrievalError
- with self.assertRaisesRegexp(exception, 'Missing required key'):
+ with six.assertRaisesRegex(self, exception, 'Missing required key'):
provider.load()
def test_missing_secret_key_raises_exception(self):
@@ -3137,7 +3137,7 @@ class TestProcessProvider(BaseEnvVar):
provider = self.create_process_provider()
exception = botocore.exceptions.CredentialRetrievalError
- with self.assertRaisesRegexp(exception, 'Missing required key'):
+ with six.assertRaisesRegex(self, exception, 'Missing required key'):
provider.load()
def test_missing_session_token(self):
diff --git a/tests/unit/test_discovery.py b/tests/unit/test_discovery.py
index 92b7d14f..d6196197 100644
--- a/tests/unit/test_discovery.py
+++ b/tests/unit/test_discovery.py
@@ -1,5 +1,8 @@
import time
-from mock import Mock, call
+try:
+ from mock import Mock, call
+except ImportError:
+ from unittest.mock import Mock, call
from tests import unittest
from botocore.awsrequest import AWSRequest
diff --git a/tests/unit/test_endpoint.py b/tests/unit/test_endpoint.py
index c979b367..2b6d896e 100644
--- a/tests/unit/test_endpoint.py
+++ b/tests/unit/test_endpoint.py
@@ -13,7 +13,10 @@
import socket
from tests import unittest
-from mock import Mock, patch, sentinel
+try:
+ from mock import Mock, patch, sentinel
+except ImportError:
+ from unittest.mock import Mock, patch, sentinel
from botocore.compat import six
from botocore.awsrequest import AWSRequest
diff --git a/tests/unit/test_errorfactory.py b/tests/unit/test_errorfactory.py
index 5b6f6a00..f8c78dbb 100644
--- a/tests/unit/test_errorfactory.py
+++ b/tests/unit/test_errorfactory.py
@@ -12,6 +12,7 @@
# language governing permissions and limitations under the License.
from tests import unittest
+from botocore.compat import six
from botocore.exceptions import ClientError
from botocore.errorfactory import BaseClientExceptions
from botocore.errorfactory import ClientExceptionsFactory
@@ -39,7 +40,7 @@ class TestBaseClientExceptions(unittest.TestCase):
def test_gettattr_message(self):
exception_cls = type('MyException', (ClientError,), {})
self.code_to_exception['MyExceptionCode'] = exception_cls
- with self.assertRaisesRegexp(
+ with six.assertRaisesRegex(self,
AttributeError, 'Valid exceptions are: MyException'):
self.exceptions.SomeUnmodeledError
diff --git a/tests/unit/test_eventstream.py b/tests/unit/test_eventstream.py
index 7b921980..07a77cf8 100644
--- a/tests/unit/test_eventstream.py
+++ b/tests/unit/test_eventstream.py
@@ -12,8 +12,10 @@
# language governing permissions and limitations under the License.
"""Unit tests for the binary event stream decoder. """
-from mock import Mock
-from nose.tools import assert_equal, raises
+try:
+ from mock import Mock
+except ImportError:
+ from unittest.mock import Mock
from botocore.parsers import EventStreamXMLParser
from botocore.eventstream import (
@@ -240,18 +242,12 @@ NEGATIVE_CASES = [
def assert_message_equal(message_a, message_b):
"""Asserts all fields for two messages are equal. """
- assert_equal(
- message_a.prelude.total_length,
- message_b.prelude.total_length
- )
- assert_equal(
- message_a.prelude.headers_length,
- message_b.prelude.headers_length
- )
- assert_equal(message_a.prelude.crc, message_b.prelude.crc)
- assert_equal(message_a.headers, message_b.headers)
- assert_equal(message_a.payload, message_b.payload)
- assert_equal(message_a.crc, message_b.crc)
+ assert message_a.prelude.total_length == message_b.prelude.total_length
+ assert message_a.prelude.headers_length == message_b.prelude.headers_length
+ assert message_a.prelude.crc == message_b.prelude.crc
+ assert message_a.headers == message_b.headers
+ assert message_a.payload == message_b.payload
+ assert message_a.crc == message_b.crc
def test_partial_message():
@@ -262,7 +258,7 @@ def test_partial_message():
mid_point = 15
event_buffer.add_data(data[:mid_point])
messages = list(event_buffer)
- assert_equal(messages, [])
+ assert messages == []
event_buffer.add_data(data[mid_point:len(data)])
for message in event_buffer:
assert_message_equal(message, EMPTY_MESSAGE[1])
@@ -280,7 +276,7 @@ def check_message_decodes(encoded, decoded):
def test_positive_cases():
"""Test that all positive cases decode how we expect. """
for (encoded, decoded) in POSITIVE_CASES:
- yield check_message_decodes, encoded, decoded
+ check_message_decodes(encoded, decoded)
def test_all_positive_cases():
@@ -301,8 +297,13 @@ def test_all_positive_cases():
def test_negative_cases():
"""Test that all negative cases raise the expected exception. """
for (encoded, exception) in NEGATIVE_CASES:
- test_function = raises(exception)(check_message_decodes)
- yield test_function, encoded, None
+ try:
+ check_message_decodes(encoded, None)
+ except exception:
+ pass
+ else:
+ raise AssertionError(
+ 'Expected exception {!s} has not been raised.'.format(exception))
def test_header_parser():
@@ -329,87 +330,87 @@ def test_header_parser():
parser = EventStreamHeaderParser()
headers = parser.parse(headers_data)
- assert_equal(headers, expected_headers)
+ assert headers == expected_headers
def test_message_prelude_properties():
"""Test that calculated properties from the payload are correct. """
# Total length: 40, Headers Length: 15, random crc
prelude = MessagePrelude(40, 15, 0x00000000)
- assert_equal(prelude.payload_length, 9)
- assert_equal(prelude.headers_end, 27)
- assert_equal(prelude.payload_end, 36)
+ assert prelude.payload_length == 9
+ assert prelude.headers_end == 27
+ assert prelude.payload_end == 36
def test_message_to_response_dict():
response_dict = PAYLOAD_ONE_STR_HEADER[1].to_response_dict()
- assert_equal(response_dict['status_code'], 200)
+ assert response_dict['status_code'] == 200
expected_headers = {'content-type': 'application/json'}
- assert_equal(response_dict['headers'], expected_headers)
- assert_equal(response_dict['body'], b"{'foo':'bar'}")
+ assert response_dict['headers'] == expected_headers
+ assert response_dict['body'] == b"{'foo':'bar'}"
def test_message_to_response_dict_error():
response_dict = ERROR_EVENT_MESSAGE[1].to_response_dict()
- assert_equal(response_dict['status_code'], 400)
+ assert response_dict['status_code'] == 400
headers = {
':message-type': 'error',
':error-code': 'code',
':error-message': 'message',
}
- assert_equal(response_dict['headers'], headers)
- assert_equal(response_dict['body'], b'')
+ assert response_dict['headers'] == headers
+ assert response_dict['body'] == b''
def test_unpack_uint8():
(value, bytes_consumed) = DecodeUtils.unpack_uint8(b'\xDE')
- assert_equal(bytes_consumed, 1)
- assert_equal(value, 0xDE)
+ assert bytes_consumed == 1
+ assert value == 0xDE
def test_unpack_uint32():
(value, bytes_consumed) = DecodeUtils.unpack_uint32(b'\xDE\xAD\xBE\xEF')
- assert_equal(bytes_consumed, 4)
- assert_equal(value, 0xDEADBEEF)
+ assert bytes_consumed == 4
+ assert value == 0xDEADBEEF
def test_unpack_int8():
(value, bytes_consumed) = DecodeUtils.unpack_int8(b'\xFE')
- assert_equal(bytes_consumed, 1)
- assert_equal(value, -2)
+ assert bytes_consumed == 1
+ assert value == -2
def test_unpack_int16():
(value, bytes_consumed) = DecodeUtils.unpack_int16(b'\xFF\xFE')
- assert_equal(bytes_consumed, 2)
- assert_equal(value, -2)
+ assert bytes_consumed == 2
+ assert value == -2
def test_unpack_int32():
(value, bytes_consumed) = DecodeUtils.unpack_int32(b'\xFF\xFF\xFF\xFE')
- assert_equal(bytes_consumed, 4)
- assert_equal(value, -2)
+ assert bytes_consumed == 4
+ assert value == -2
def test_unpack_int64():
test_bytes = b'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE'
(value, bytes_consumed) = DecodeUtils.unpack_int64(test_bytes)
- assert_equal(bytes_consumed, 8)
- assert_equal(value, -2)
+ assert bytes_consumed == 8
+ assert value == -2
def test_unpack_array_short():
test_bytes = b'\x00\x10application/json'
(value, bytes_consumed) = DecodeUtils.unpack_byte_array(test_bytes)
- assert_equal(bytes_consumed, 18)
- assert_equal(value, b'application/json')
+ assert bytes_consumed == 18
+ assert value == b'application/json'
def test_unpack_byte_array_int():
(value, array_bytes_consumed) = DecodeUtils.unpack_byte_array(
b'\x00\x00\x00\x10application/json', length_byte_size=4)
- assert_equal(array_bytes_consumed, 20)
- assert_equal(value, b'application/json')
+ assert array_bytes_consumed == 20
+ assert value == b'application/json'
def test_unpack_utf8_string():
@@ -417,18 +418,19 @@ def test_unpack_utf8_string():
utf8_string = b'\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e'
encoded = length + utf8_string
(value, bytes_consumed) = DecodeUtils.unpack_utf8_string(encoded)
- assert_equal(bytes_consumed, 11)
- assert_equal(value, utf8_string.decode('utf-8'))
+ assert bytes_consumed == 11
+ assert value == utf8_string.decode('utf-8')
def test_unpack_prelude():
data = b'\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x03'
prelude = DecodeUtils.unpack_prelude(data)
- assert_equal(prelude, ((1, 2, 3), 12))
+ assert prelude == ((1, 2, 3), 12)
def create_mock_raw_stream(*data):
raw_stream = Mock()
+
def generator():
for chunk in data:
yield chunk
@@ -445,7 +447,7 @@ def test_event_stream_wrapper_iteration():
output_shape = Mock()
event_stream = EventStream(raw_stream, output_shape, parser, '')
events = list(event_stream)
- assert_equal(len(events), 1)
+ assert len(events) == 1
response_dict = {
'headers': {'event-id': 0x0000a00c},
@@ -455,14 +457,19 @@ def test_event_stream_wrapper_iteration():
parser.parse.assert_called_with(response_dict, output_shape)
-@raises(EventStreamError)
def test_eventstream_wrapper_iteration_error():
- raw_stream = create_mock_raw_stream(ERROR_EVENT_MESSAGE[0])
- parser = Mock(spec=EventStreamXMLParser)
- parser.parse.return_value = {}
- output_shape = Mock()
- event_stream = EventStream(raw_stream, output_shape, parser, '')
- list(event_stream)
+ try:
+ raw_stream = create_mock_raw_stream(ERROR_EVENT_MESSAGE[0])
+ parser = Mock(spec=EventStreamXMLParser)
+ parser.parse.return_value = {}
+ output_shape = Mock()
+ event_stream = EventStream(raw_stream, output_shape, parser, '')
+ list(event_stream)
+ except EventStreamError:
+ pass
+ else:
+ raise AssertionError(
+ 'Expected exception EventStreamError has not been raised.')
def test_event_stream_wrapper_close():
@@ -492,22 +499,32 @@ def test_event_stream_initial_response():
assert event.payload == payload
-@raises(NoInitialResponseError)
def test_event_stream_initial_response_wrong_type():
- raw_stream = create_mock_raw_stream(
- b"\x00\x00\x00+\x00\x00\x00\x0e4\x8b\xec{\x08event-id\x04\x00",
- b"\x00\xa0\x0c{'foo':'bar'}\xd3\x89\x02\x85",
- )
- parser = Mock(spec=EventStreamXMLParser)
- output_shape = Mock()
- event_stream = EventStream(raw_stream, output_shape, parser, '')
- event_stream.get_initial_response()
+ try:
+ raw_stream = create_mock_raw_stream(
+ b"\x00\x00\x00+\x00\x00\x00\x0e4\x8b\xec{\x08event-id\x04\x00",
+ b"\x00\xa0\x0c{'foo':'bar'}\xd3\x89\x02\x85",
+ )
+ parser = Mock(spec=EventStreamXMLParser)
+ output_shape = Mock()
+ event_stream = EventStream(raw_stream, output_shape, parser, '')
+ event_stream.get_initial_response()
+ except NoInitialResponseError:
+ pass
+ else:
+ raise AssertionError(
+ 'Expected exception NoInitialResponseError has not been raised.')
-@raises(NoInitialResponseError)
def test_event_stream_initial_response_no_event():
- raw_stream = create_mock_raw_stream(b'')
- parser = Mock(spec=EventStreamXMLParser)
- output_shape = Mock()
- event_stream = EventStream(raw_stream, output_shape, parser, '')
- event_stream.get_initial_response()
+ try:
+ raw_stream = create_mock_raw_stream(b'')
+ parser = Mock(spec=EventStreamXMLParser)
+ output_shape = Mock()
+ event_stream = EventStream(raw_stream, output_shape, parser, '')
+ event_stream.get_initial_response()
+ except NoInitialResponseError:
+ pass
+ else:
+ raise AssertionError(
+ 'Expected exception NoInitialResponseError has not been raised.')
diff --git a/tests/unit/test_exceptions.py b/tests/unit/test_exceptions.py
index e147697a..a81fbf85 100644
--- a/tests/unit/test_exceptions.py
+++ b/tests/unit/test_exceptions.py
@@ -14,8 +14,6 @@
import pickle
from tests import unittest
-from nose.tools import assert_equal
-
import botocore.awsrequest
import botocore.session
from botocore import exceptions
@@ -24,7 +22,7 @@ from botocore import exceptions
def test_client_error_can_handle_missing_code_or_message():
response = {'Error': {}}
expect = 'An error occurred (Unknown) when calling the blackhole operation: Unknown'
- assert_equal(str(exceptions.ClientError(response, 'blackhole')), expect)
+ assert str(exceptions.ClientError(response, 'blackhole')) == expect
def test_client_error_has_operation_name_set():
@@ -36,7 +34,7 @@ def test_client_error_has_operation_name_set():
def test_client_error_set_correct_operation_name():
response = {'Error': {}}
exception = exceptions.ClientError(response, 'blackhole')
- assert_equal(exception.operation_name, 'blackhole')
+ assert exception.operation_name == 'blackhole'
def test_retry_info_added_when_present():
diff --git a/tests/unit/test_handlers.py b/tests/unit/test_handlers.py
index fde22e34..f0fe8a96 100644
--- a/tests/unit/test_handlers.py
+++ b/tests/unit/test_handlers.py
@@ -126,7 +126,7 @@ class TestHandlers(BaseSessionTest):
'foo/keyname%2B?versionId=asdf+')
def test_copy_source_has_validation_failure(self):
- with self.assertRaisesRegexp(ParamValidationError, 'Key'):
+ with six.assertRaisesRegex(self, ParamValidationError, 'Key'):
handlers.handle_copy_source_param(
{'CopySource': {'Bucket': 'foo'}})
diff --git a/tests/unit/test_http_client_exception_mapping.py b/tests/unit/test_http_client_exception_mapping.py
index 9eee38c4..2bcffac9 100644
--- a/tests/unit/test_http_client_exception_mapping.py
+++ b/tests/unit/test_http_client_exception_mapping.py
@@ -1,4 +1,4 @@
-from nose.tools import assert_raises
+import unittest
from botocore import exceptions as botocore_exceptions
from botocore.vendored.requests import exceptions as requests_exceptions
@@ -13,15 +13,9 @@ EXCEPTION_MAPPING = [
]
-def _raise_exception(exception):
- raise exception(endpoint_url=None, proxy_url=None, error=None)
-
-
-def _test_exception_mapping(new_exception, old_exception):
- # assert that the new exception can still be caught by the old vendored one
- assert_raises(old_exception, _raise_exception, new_exception)
-
-
-def test_http_client_exception_mapping():
- for new_exception, old_exception in EXCEPTION_MAPPING:
- yield _test_exception_mapping, new_exception, old_exception
+class TestHttpClientExceptionMapping(unittest.TestCase):
+ def test_http_client_exception_mapping(self):
+ for new_exception, old_exception in EXCEPTION_MAPPING:
+ with self.assertRaises(old_exception):
+ raise new_exception(endpoint_url=None, proxy_url=None,
+ error=None)
diff --git a/tests/unit/test_http_session.py b/tests/unit/test_http_session.py
index 18a15247..da84c94b 100644
--- a/tests/unit/test_http_session.py
+++ b/tests/unit/test_http_session.py
@@ -1,8 +1,10 @@
import socket
-from mock import patch, Mock, ANY
+try:
+ from mock import patch, Mock, ANY
+except ImportError:
+ from unittest.mock import patch, Mock, ANY
from tests import unittest
-from nose.tools import raises
from urllib3.exceptions import NewConnectionError, ProtocolError
from botocore.vendored import six
@@ -250,15 +252,15 @@ class TestURLLib3Session(unittest.TestCase):
session = URLLib3Session()
session.send(self.request.prepare())
- @raises(EndpointConnectionError)
def test_catches_new_connection_error(self):
- error = NewConnectionError(None, None)
- self.make_request_with_error(error)
+ with self.assertRaises(EndpointConnectionError):
+ error = NewConnectionError(None, None)
+ self.make_request_with_error(error)
- @raises(ConnectionClosedError)
def test_catches_bad_status_line(self):
- error = ProtocolError(None)
- self.make_request_with_error(error)
+ with self.assertRaises(ConnectionClosedError):
+ error = ProtocolError(None)
+ self.make_request_with_error(error)
def test_aws_connection_classes_are_used(self):
session = URLLib3Session()
diff --git a/tests/unit/test_loaders.py b/tests/unit/test_loaders.py
index c9e8a5b5..ff25066f 100644
--- a/tests/unit/test_loaders.py
+++ b/tests/unit/test_loaders.py
@@ -28,6 +28,7 @@ from botocore.exceptions import DataNotFoundError, UnknownServiceError
from botocore.loaders import JSONFileLoader
from botocore.loaders import Loader, create_loader
from botocore.loaders import ExtrasProcessor
+from botocore.compat import six
from tests import BaseEnvVar
@@ -156,8 +157,8 @@ class TestLoader(BaseEnvVar):
# Should have a) the unknown service name and b) list of valid
# service names.
- with self.assertRaisesRegexp(UnknownServiceError,
- 'Unknown service.*BAZ.*baz'):
+ with six.assertRaisesRegex(self, UnknownServiceError,
+ 'Unknown service.*BAZ.*baz'):
loader.load_service_model('BAZ', type_name='service-2')
def test_load_service_model_uses_provided_type_name(self):
@@ -169,8 +170,8 @@ class TestLoader(BaseEnvVar):
# Should have a) the unknown service name and b) list of valid
# service names.
provided_type_name = 'not-service-2'
- with self.assertRaisesRegexp(UnknownServiceError,
- 'Unknown service.*BAZ.*baz'):
+ with six.assertRaisesRegex(self, UnknownServiceError,
+ 'Unknown service.*BAZ.*baz'):
loader.load_service_model(
'BAZ', type_name=provided_type_name)
diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py
index 7c2a672f..fe1511de 100644
--- a/tests/unit/test_model.py
+++ b/tests/unit/test_model.py
@@ -2,11 +2,11 @@ from tests import unittest
from botocore import model
from botocore.compat import OrderedDict
-from botocore.exceptions import MissingServiceIdError
+from botocore.compat import six
def test_missing_model_attribute_raises_exception():
- # We're using a nose test generator here to cut down
+ # We're using a test generator here to cut down
# on the duplication. The property names below
# all have the same test logic.
service_model = model.ServiceModel({'metadata': {'endpointPrefix': 'foo'}})
@@ -28,7 +28,7 @@ def test_missing_model_attribute_raises_exception():
"be raised, but no exception was raised for: %s" % attr_name)
for name in property_names:
- yield _test_attribute_raise_exception, name
+ _test_attribute_raise_exception(name)
class TestServiceId(unittest.TestCase):
@@ -105,9 +105,9 @@ class TestServiceModel(unittest.TestCase):
}
service_name = 'myservice'
service_model = model.ServiceModel(service_model, service_name)
- with self.assertRaisesRegexp(model.UndefinedModelAttributeError,
- service_name):
- service_model.service_id
+ with six.assertRaisesRegex(self, model.UndefinedModelAttributeError,
+ service_name):
+ service_model.service_id()
def test_operation_does_not_exist(self):
with self.assertRaises(model.OperationNotFoundError):
diff --git a/tests/unit/test_paginate.py b/tests/unit/test_paginate.py
index bf8666b4..cc7d300d 100644
--- a/tests/unit/test_paginate.py
+++ b/tests/unit/test_paginate.py
@@ -823,7 +823,7 @@ class TestKeyIterators(unittest.TestCase):
{"Users": ["User3"]},
]
self.method.side_effect = responses
- with self.assertRaisesRegexp(ValueError, 'Bad starting token'):
+ with six.assertRaisesRegex(self, ValueError, 'Bad starting token'):
pagination_config = {'StartingToken': 'does___not___work'}
self.paginator.paginate(
PaginationConfig=pagination_config).build_full_result()
diff --git a/tests/unit/test_parsers.py b/tests/unit/test_parsers.py
index fdb24eef..ad813641 100644
--- a/tests/unit/test_parsers.py
+++ b/tests/unit/test_parsers.py
@@ -14,11 +14,11 @@ from tests import unittest, RawResponse
import datetime
from dateutil.tz import tzutc
-from nose.tools import assert_equal
from botocore import parsers
from botocore import model
from botocore.compat import json, MutableMapping
+from botocore.compat import six
# HTTP responses will typically return a custom HTTP
@@ -597,8 +597,8 @@ class TestHandlesInvalidXMLResponses(unittest.TestCase):
parser = parsers.QueryParser()
output_shape = None
# The XML body should be in the error message.
- with self.assertRaisesRegexp(parsers.ResponseParserError,
- '<DeleteTagsResponse'):
+ with six.assertRaisesRegex(self, parsers.ResponseParserError,
+ '<DeleteTagsResponse'):
parser.parse(
{'body': invalid_xml, 'headers': {}, 'status_code': 200},
output_shape)
@@ -1310,9 +1310,9 @@ def test_can_handle_generic_error_message():
).encode('utf-8')
empty_body = b''
none_body = None
- yield _assert_parses_generic_error, parser_cls(), generic_html_body
- yield _assert_parses_generic_error, parser_cls(), empty_body
- yield _assert_parses_generic_error, parser_cls(), none_body
+ _assert_parses_generic_error, parser_cls(), generic_html_body
+ _assert_parses_generic_error, parser_cls(), empty_body
+ _assert_parses_generic_error, parser_cls(), none_body
def _assert_parses_generic_error(parser, body):
@@ -1320,7 +1320,6 @@ def _assert_parses_generic_error(parser, body):
# html error page. We should be able to handle this case.
parsed = parser.parse({
'body': body, 'headers': {}, 'status_code': 503}, None)
- assert_equal(
- parsed['Error'],
- {'Code': '503', 'Message': 'Service Unavailable'})
- assert_equal(parsed['ResponseMetadata']['HTTPStatusCode'], 503)
+ assert parsed['Error'] == \
+ {'Code': '503', 'Message': 'Service Unavailable'}
+ assert parsed['ResponseMetadata']['HTTPStatusCode'] == 503
diff --git a/tests/unit/test_protocols.py b/tests/unit/test_protocols.py
index f2c65a43..b6a7ca0e 100644
--- a/tests/unit/test_protocols.py
+++ b/tests/unit/test_protocols.py
@@ -16,7 +16,7 @@
This is a test runner for all the JSON tests defined in
``tests/unit/protocols/``, including both the input/output tests.
-You can use the normal ``nosetests tests/unit/test_protocols.py`` to run
+You can use the normal ``pytest tests/unit/test_protocols.py`` to run
this test. In addition, there are several env vars you can use during
development.
@@ -37,17 +37,17 @@ failed test.
To run tests from only a single file, you can set the
BOTOCORE_TEST env var::
- BOTOCORE_TEST=tests/unit/compliance/input/json.json nosetests tests/unit/test_protocols.py
+ BOTOCORE_TEST=tests/unit/compliance/input/json.json pytest tests/unit/test_protocols.py
To run a single test suite you can set the BOTOCORE_TEST_ID env var:
BOTOCORE_TEST=tests/unit/compliance/input/json.json BOTOCORE_TEST_ID=5 \
- nosetests tests/unit/test_protocols.py
+ pytest tests/unit/test_protocols.py
To run a single test case in a suite (useful when debugging a single test), you
can set the BOTOCORE_TEST_ID env var with the ``suite_id:test_id`` syntax.
- BOTOCORE_TEST_ID=5:1 nosetests test/unit/test_protocols.py
+ BOTOCORE_TEST_ID=5:1 pytest test/unit/test_protocols.py
"""
import os
@@ -69,8 +69,6 @@ from botocore.awsrequest import prepare_request_dict
from calendar import timegm
from botocore.model import NoShapeFoundError
-from nose.tools import assert_equal as _assert_equal
-
TEST_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'protocols')
@@ -101,9 +99,9 @@ def test_compliance():
if model.get('description') in PROTOCOL_TEST_BLACKLIST:
continue
if 'params' in case:
- yield _test_input, model, case, basename
+ _test_input(model, case, basename)
elif 'response' in case:
- yield _test_output, model, case, basename
+ _test_output(model, case, basename)
def _test_input(json_description, case, basename):
@@ -142,7 +140,7 @@ def _assert_endpoints_equal(actual, expected, endpoint):
return
prepare_request_dict(actual, endpoint)
actual_host = urlsplit(actual['url']).netloc
- assert_equal(actual_host, expected['host'], 'Host')
+ rich_assert_equal(actual_host, expected['host'], 'Host')
class MockRawResponse(object):
@@ -208,7 +206,7 @@ def _test_output(json_description, case, basename):
expected_result.update(case['error'])
else:
expected_result = case['result']
- assert_equal(parsed, expected_result, "Body")
+ rich_assert_equal(parsed, expected_result, "Body")
except Exception as e:
_output_failure_message(model.metadata['protocol'],
case, parsed, expected_result, e)
@@ -318,11 +316,11 @@ def _try_json_dump(obj):
return str(obj)
-def assert_equal(first, second, prefix):
+def rich_assert_equal(first, second, prefix):
# A better assert equals. It allows you to just provide
# prefix instead of the entire message.
try:
- _assert_equal(first, second)
+ assert first == second
except Exception:
try:
better = "%s (actual != expected)\n%s !=\n%s" % (
@@ -353,14 +351,14 @@ def _serialize_request_description(request_dict):
def _assert_requests_equal(actual, expected):
- assert_equal(actual['body'], expected.get('body', '').encode('utf-8'),
+ rich_assert_equal(actual['body'], expected.get('body', '').encode('utf-8'),
'Body value')
actual_headers = dict(actual['headers'])
expected_headers = expected.get('headers', {})
- assert_equal(actual_headers, expected_headers, "Header values")
- assert_equal(actual['url_path'], expected.get('uri', ''), "URI")
+ rich_assert_equal(actual_headers, expected_headers, "Header values")
+ rich_assert_equal(actual['url_path'], expected.get('uri', ''), "URI")
if 'method' in expected:
- assert_equal(actual['method'], expected['method'], "Method")
+ rich_assert_equal(actual['method'], expected['method'], "Method")
def _walk_files():
diff --git a/tests/unit/test_s3_addressing.py b/tests/unit/test_s3_addressing.py
index f828044f..ee2768fd 100644
--- a/tests/unit/test_s3_addressing.py
+++ b/tests/unit/test_s3_addressing.py
@@ -16,9 +16,13 @@
import os
from tests import BaseSessionTest, ClientHTTPStubber
-from mock import patch, Mock
+try:
+ from mock import patch, Mock
+except ImportError:
+ from unittest.mock import patch, Mock
from botocore.compat import OrderedDict
+from botocore.compat import six
from botocore.handlers import set_list_objects_encoding_type_url
@@ -198,7 +202,7 @@ class TestS3Addressing(BaseSessionTest):
'https://s3.us-west-2.amazonaws.com/192.168.5.256/mykeyname')
def test_invalid_endpoint_raises_exception(self):
- with self.assertRaisesRegexp(ValueError, 'Invalid region'):
+ with six.assertRaisesRegex(self, ValueError, 'Invalid region'):
self.session.create_client('s3', 'Invalid region')
def test_non_existent_region(self):
diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py
index d55f03f8..58360e04 100644
--- a/tests/unit/test_utils.py
+++ b/tests/unit/test_utils.py
@@ -2101,7 +2101,7 @@ class TestContainerMetadataFetcher(unittest.TestCase):
response_body = {'foo': 'bar'}
self.set_http_responses_to(response_body)
fetcher = self.create_fetcher()
- with self.assertRaisesRegexp(ValueError, 'Unsupported host'):
+ with six.assertRaisesRegex(self, ValueError, 'Unsupported host'):
fetcher.retrieve_full_uri(full_uri)
self.assertFalse(self.http.send.called)
diff --git a/tests/unit/test_waiters.py b/tests/unit/test_waiters.py
index 9637c05a..c82063a1 100644
--- a/tests/unit/test_waiters.py
+++ b/tests/unit/test_waiters.py
@@ -389,7 +389,7 @@ class TestWaitersObjects(unittest.TestCase):
)
waiter = Waiter('MyWaiter', config, operation_method)
- with self.assertRaisesRegexp(WaiterError, error_message):
+ with six.assertRaisesRegex(self, WaiterError, error_message):
waiter.wait()
def _assert_failure_state_error_raised(self, acceptors, responses, expected_msg):
--
2.29.2
From 458bfd2118a700c8ce5871304e107d0f5dae19c6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Mat=C4=9Bj=20Cepl?= <mcepl@cepl.eu>
Date: Wed, 26 Aug 2020 20:28:35 +0200
Subject: [PATCH 02/14] Cleanup
(Fix scripts for running tests in CI.)
Don't import mock when it is available in unittest.mock.
---
(.gitignore | 2 +-)
(scripts/ci/run-integ-tests | 3 +--)
(scripts/ci/run-tests | 4 +---)
tests/__init__.py | 5 ++++-
tests/functional/csm/test_monitoring.py | 2 +-
tests/functional/test_apigateway.py | 2 +-
tests/functional/test_cloudsearchdomain.py | 2 +-
tests/functional/test_cognito_idp.py | 2 +-
tests/functional/test_credentials.py | 2 +-
tests/functional/test_docdb.py | 2 +-
tests/functional/test_ec2.py | 2 +-
tests/functional/test_history.py | 2 +-
tests/functional/test_lex.py | 2 +-
tests/functional/test_machinelearning.py | 2 +-
tests/functional/test_neptune.py | 2 +-
tests/functional/test_public_apis.py | 2 +-
tests/functional/test_rds.py | 2 +-
tests/functional/test_regions.py | 2 +-
tests/functional/test_session.py | 2 +-
tests/functional/test_sts.py | 2 +-
tests/integration/test_credentials.py | 2 +-
tests/integration/test_loaders.py | 2 +-
tests/unit/auth/test_signers.py | 2 +-
tests/unit/auth/test_sigv4.py | 2 +-
tests/unit/docs/__init__.py | 2 +-
tests/unit/docs/bcdoc/test_docstringparser.py | 2 +-
tests/unit/docs/test_docs.py | 2 +-
tests/unit/docs/test_example.py | 2 +-
tests/unit/docs/test_params.py | 2 +-
tests/unit/docs/test_service.py | 2 +-
tests/unit/retries/test_adaptive.py | 2 +-
tests/unit/retries/test_special.py | 2 +-
tests/unit/retries/test_standard.py | 2 +-
tests/unit/test_args.py | 2 +-
tests/unit/test_client.py | 2 +-
tests/unit/test_compat.py | 2 +-
tests/unit/test_config_provider.py | 2 +-
tests/unit/test_configloader.py | 2 +-
tests/unit/test_credentials.py | 2 +-
tests/unit/test_handlers.py | 2 +-
tests/unit/test_history.py | 2 +-
tests/unit/test_idempotency.py | 2 +-
tests/unit/test_loaders.py | 2 +-
tests/unit/test_paginate.py | 2 +-
tests/unit/test_retryhandler.py | 2 +-
tests/unit/test_session.py | 2 +-
tests/unit/test_session_legacy.py | 2 +-
tests/unit/test_signers.py | 2 +-
tests/unit/test_stub.py | 2 +-
tests/unit/test_utils.py | 2 +-
tests/unit/test_waiters.py | 2 +-
51 files changed, 54 insertions(+), 54 deletions(-)
diff --git a/tests/__init__.py b/tests/__init__.py
index 046bdb2b..71d9f04e 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -13,7 +13,10 @@
import os
import sys
-import mock
+try:
+ import mock
+except ImportError:
+ from unittest import mock
import time
import random
import shutil
diff --git a/tests/functional/csm/test_monitoring.py b/tests/functional/csm/test_monitoring.py
index b94dc996..53ab1369 100644
--- a/tests/functional/csm/test_monitoring.py
+++ b/tests/functional/csm/test_monitoring.py
@@ -18,7 +18,7 @@ import os
import socket
import threading
-import mock
+from tests import mock
from tests import temporary_file
from tests import ClientHTTPStubber
diff --git a/tests/functional/test_apigateway.py b/tests/functional/test_apigateway.py
index 5422585b..547ce1e3 100644
--- a/tests/functional/test_apigateway.py
+++ b/tests/functional/test_apigateway.py
@@ -10,7 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import mock
+from tests import mock
from tests import BaseSessionTest, ClientHTTPStubber
diff --git a/tests/functional/test_cloudsearchdomain.py b/tests/functional/test_cloudsearchdomain.py
index a94bb7c6..1933644f 100644
--- a/tests/functional/test_cloudsearchdomain.py
+++ b/tests/functional/test_cloudsearchdomain.py
@@ -10,7 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import mock
+from tests import mock
from tests import BaseSessionTest, ClientHTTPStubber
diff --git a/tests/functional/test_cognito_idp.py b/tests/functional/test_cognito_idp.py
index 7f919d3a..7c216237 100644
--- a/tests/functional/test_cognito_idp.py
+++ b/tests/functional/test_cognito_idp.py
@@ -10,7 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import mock
+from tests import mock
from tests import create_session, ClientHTTPStubber
diff --git a/tests/functional/test_credentials.py b/tests/functional/test_credentials.py
index a8d7bacc..5d032baf 100644
--- a/tests/functional/test_credentials.py
+++ b/tests/functional/test_credentials.py
@@ -15,7 +15,7 @@ import threading
import os
import math
import time
-import mock
+from tests import mock
import tempfile
import shutil
from datetime import datetime, timedelta
diff --git a/tests/functional/test_docdb.py b/tests/functional/test_docdb.py
index 3f0ebfac..0c35d408 100644
--- a/tests/functional/test_docdb.py
+++ b/tests/functional/test_docdb.py
@@ -10,7 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import mock
+from tests import mock
from contextlib import contextmanager
import botocore.session
diff --git a/tests/functional/test_ec2.py b/tests/functional/test_ec2.py
index 795094e9..50728e73 100644
--- a/tests/functional/test_ec2.py
+++ b/tests/functional/test_ec2.py
@@ -11,7 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
-import mock
+from tests import mock
from tests import unittest, ClientHTTPStubber, BaseSessionTest
from botocore.compat import parse_qs, urlparse
diff --git a/tests/functional/test_history.py b/tests/functional/test_history.py
index d3d46f4a..ae0f1654 100644
--- a/tests/functional/test_history.py
+++ b/tests/functional/test_history.py
@@ -1,6 +1,6 @@
from contextlib import contextmanager
-import mock
+from tests import mock
from tests import BaseSessionTest, ClientHTTPStubber
from botocore.history import BaseHistoryHandler
diff --git a/tests/functional/test_lex.py b/tests/functional/test_lex.py
index 88ab4649..ff152427 100644
--- a/tests/functional/test_lex.py
+++ b/tests/functional/test_lex.py
@@ -10,7 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import mock
+from tests import mock
from datetime import datetime
from tests import BaseSessionTest, ClientHTTPStubber
diff --git a/tests/functional/test_machinelearning.py b/tests/functional/test_machinelearning.py
index 107fb198..51fc3fae 100644
--- a/tests/functional/test_machinelearning.py
+++ b/tests/functional/test_machinelearning.py
@@ -10,7 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import mock
+from tests import mock
from tests import BaseSessionTest, ClientHTTPStubber
diff --git a/tests/functional/test_neptune.py b/tests/functional/test_neptune.py
index 187797b7..aafbdd4b 100644
--- a/tests/functional/test_neptune.py
+++ b/tests/functional/test_neptune.py
@@ -10,7 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import mock
+from tests import mock
from contextlib import contextmanager
import botocore.session
diff --git a/tests/functional/test_public_apis.py b/tests/functional/test_public_apis.py
index 7e7cbf33..b9ad5948 100644
--- a/tests/functional/test_public_apis.py
+++ b/tests/functional/test_public_apis.py
@@ -12,7 +12,7 @@
# language governing permissions and limitations under the License.
from collections import defaultdict
-import mock
+from tests import mock
from tests import ClientHTTPStubber
from botocore.session import Session
diff --git a/tests/functional/test_rds.py b/tests/functional/test_rds.py
index ca8b574e..8bf85d5e 100644
--- a/tests/functional/test_rds.py
+++ b/tests/functional/test_rds.py
@@ -10,7 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import mock
+from tests import mock
from contextlib import contextmanager
import botocore.session
diff --git a/tests/functional/test_regions.py b/tests/functional/test_regions.py
index 42277c2d..e117f356 100644
--- a/tests/functional/test_regions.py
+++ b/tests/functional/test_regions.py
@@ -12,7 +12,7 @@
# language governing permissions and limitations under the License.
from tests import create_session, unittest
-import mock
+from tests import mock
from botocore.client import ClientEndpointBridge
from botocore.exceptions import NoRegionError
diff --git a/tests/functional/test_session.py b/tests/functional/test_session.py
index 2019c9ec..7eb67de5 100644
--- a/tests/functional/test_session.py
+++ b/tests/functional/test_session.py
@@ -12,7 +12,7 @@
# language governing permissions and limitations under the License.
from tests import unittest, temporary_file
-import mock
+from tests import mock
import botocore.session
from botocore.exceptions import ProfileNotFound
diff --git a/tests/functional/test_sts.py b/tests/functional/test_sts.py
index d54d682c..8c7b55d7 100644
--- a/tests/functional/test_sts.py
+++ b/tests/functional/test_sts.py
@@ -13,7 +13,7 @@
from datetime import datetime
import re
-import mock
+from tests import mock
from tests import BaseSessionTest
from tests import temporary_file
diff --git a/tests/integration/test_credentials.py b/tests/integration/test_credentials.py
index 3f55ceec..1f7f4532 100644
--- a/tests/integration/test_credentials.py
+++ b/tests/integration/test_credentials.py
@@ -11,7 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
-import mock
+from tests import mock
import tempfile
import shutil
import json
diff --git a/tests/integration/test_loaders.py b/tests/integration/test_loaders.py
index 051a7f6a..33804fd8 100644
--- a/tests/integration/test_loaders.py
+++ b/tests/integration/test_loaders.py
@@ -13,7 +13,7 @@
import os
from tests import unittest
-import mock
+from tests import mock
import botocore.session
diff --git a/tests/unit/auth/test_signers.py b/tests/unit/auth/test_signers.py
index 18a3644d..77c41978 100644
--- a/tests/unit/auth/test_signers.py
+++ b/tests/unit/auth/test_signers.py
@@ -18,7 +18,7 @@ import time
import base64
import json
-import mock
+from tests import mock
import botocore.auth
import botocore.credentials
diff --git a/tests/unit/auth/test_sigv4.py b/tests/unit/auth/test_sigv4.py
index fa711192..a4e35aea 100644
--- a/tests/unit/auth/test_sigv4.py
+++ b/tests/unit/auth/test_sigv4.py
@@ -27,7 +27,7 @@ import io
import datetime
from botocore.compat import six
-import mock
+from tests import mock
import botocore.auth
from botocore.awsrequest import AWSRequest
diff --git a/tests/unit/docs/__init__.py b/tests/unit/docs/__init__.py
index e34f22c9..16625c1c 100644
--- a/tests/unit/docs/__init__.py
+++ b/tests/unit/docs/__init__.py
@@ -16,7 +16,7 @@ import tempfile
import shutil
from botocore.docs.bcdoc.restdoc import DocumentStructure
-import mock
+from tests import mock
from tests import unittest
from botocore.compat import OrderedDict
diff --git a/tests/unit/docs/bcdoc/test_docstringparser.py b/tests/unit/docs/bcdoc/test_docstringparser.py
index 23acdea1..e61b070c 100644
--- a/tests/unit/docs/bcdoc/test_docstringparser.py
+++ b/tests/unit/docs/bcdoc/test_docstringparser.py
@@ -18,7 +18,7 @@
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
-import mock
+from tests import mock
from tests import unittest
import botocore.docs.bcdoc.docstringparser as parser
diff --git a/tests/unit/docs/test_docs.py b/tests/unit/docs/test_docs.py
index e191e05c..3582f344 100644
--- a/tests/unit/docs/test_docs.py
+++ b/tests/unit/docs/test_docs.py
@@ -14,7 +14,7 @@ import os
import shutil
import tempfile
-import mock
+from tests import mock
from tests.unit.docs import BaseDocsTest
from botocore.session import get_session
diff --git a/tests/unit/docs/test_example.py b/tests/unit/docs/test_example.py
index 04e4a111..677fd769 100644
--- a/tests/unit/docs/test_example.py
+++ b/tests/unit/docs/test_example.py
@@ -10,7 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import mock
+from tests import mock
from tests.unit.docs import BaseDocsTest
from botocore.hooks import HierarchicalEmitter
diff --git a/tests/unit/docs/test_params.py b/tests/unit/docs/test_params.py
index 8d49f282..8bb8d429 100644
--- a/tests/unit/docs/test_params.py
+++ b/tests/unit/docs/test_params.py
@@ -10,7 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import mock
+from tests import mock
from tests.unit.docs import BaseDocsTest
from botocore.hooks import HierarchicalEmitter
diff --git a/tests/unit/docs/test_service.py b/tests/unit/docs/test_service.py
index 895362f0..f768626c 100644
--- a/tests/unit/docs/test_service.py
+++ b/tests/unit/docs/test_service.py
@@ -12,7 +12,7 @@
# language governing permissions and limitations under the License.
import os
-import mock
+from tests import mock
from tests.unit.docs import BaseDocsTest
from botocore.session import get_session
diff --git a/tests/unit/retries/test_adaptive.py b/tests/unit/retries/test_adaptive.py
index 40796a6e..5c495187 100644
--- a/tests/unit/retries/test_adaptive.py
+++ b/tests/unit/retries/test_adaptive.py
@@ -1,6 +1,6 @@
from tests import unittest
-import mock
+from tests import mock
from botocore.retries import adaptive
from botocore.retries import standard
diff --git a/tests/unit/retries/test_special.py b/tests/unit/retries/test_special.py
index 2929c923..f643b902 100644
--- a/tests/unit/retries/test_special.py
+++ b/tests/unit/retries/test_special.py
@@ -1,6 +1,6 @@
from tests import unittest
-import mock
+from tests import mock
from botocore.awsrequest import AWSResponse
from botocore.retries import standard, special
diff --git a/tests/unit/retries/test_standard.py b/tests/unit/retries/test_standard.py
index 063823ef..f8a14a9f 100644
--- a/tests/unit/retries/test_standard.py
+++ b/tests/unit/retries/test_standard.py
@@ -1,6 +1,6 @@
from tests import unittest
-import mock
+from tests import mock
from botocore.retries import standard
from botocore.retries import quota
diff --git a/tests/unit/test_args.py b/tests/unit/test_args.py
index e1d2a6f1..e2427a02 100644
--- a/tests/unit/test_args.py
+++ b/tests/unit/test_args.py
@@ -15,7 +15,7 @@ import socket
import botocore.config
from tests import unittest
-import mock
+from tests import mock
from botocore import args
from botocore import exceptions
diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py
index 2f6384f9..3e12dddf 100644
--- a/tests/unit/test_client.py
+++ b/tests/unit/test_client.py
@@ -12,7 +12,7 @@
# language governing permissions and limitations under the License.
import botocore.config
from tests import unittest
-import mock
+from tests import mock
import botocore
from botocore import utils
diff --git a/tests/unit/test_compat.py b/tests/unit/test_compat.py
index 40b7d9e2..21352ac8 100644
--- a/tests/unit/test_compat.py
+++ b/tests/unit/test_compat.py
@@ -11,7 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import datetime
-import mock
+from tests import mock
from botocore.exceptions import MD5UnavailableError
from botocore.compat import (
diff --git a/tests/unit/test_config_provider.py b/tests/unit/test_config_provider.py
index c17a572c..04ff258b 100644
--- a/tests/unit/test_config_provider.py
+++ b/tests/unit/test_config_provider.py
@@ -11,7 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
-import mock
+from tests import mock
import botocore
import botocore.session as session
diff --git a/tests/unit/test_configloader.py b/tests/unit/test_configloader.py
index 8e0bddc1..14ce817f 100644
--- a/tests/unit/test_configloader.py
+++ b/tests/unit/test_configloader.py
@@ -14,7 +14,7 @@
# language governing permissions and limitations under the License.
from tests import unittest, BaseEnvVar
import os
-import mock
+from tests import mock
import tempfile
import shutil
diff --git a/tests/unit/test_credentials.py b/tests/unit/test_credentials.py
index 06fee6f6..791b7452 100644
--- a/tests/unit/test_credentials.py
+++ b/tests/unit/test_credentials.py
@@ -13,7 +13,7 @@
# language governing permissions and limitations under the License.
from datetime import datetime, timedelta
import subprocess
-import mock
+from tests import mock
import os
import tempfile
import shutil
diff --git a/tests/unit/test_handlers.py b/tests/unit/test_handlers.py
index f0fe8a96..7f7aa3a4 100644
--- a/tests/unit/test_handlers.py
+++ b/tests/unit/test_handlers.py
@@ -14,7 +14,7 @@
from tests import unittest, BaseSessionTest
import base64
-import mock
+from tests import mock
import copy
import os
import json
diff --git a/tests/unit/test_history.py b/tests/unit/test_history.py
index d38459e1..d1113956 100644
--- a/tests/unit/test_history.py
+++ b/tests/unit/test_history.py
@@ -1,6 +1,6 @@
from tests import unittest
-import mock
+from tests import mock
from botocore.history import HistoryRecorder
from botocore.history import BaseHistoryHandler
diff --git a/tests/unit/test_idempotency.py b/tests/unit/test_idempotency.py
index 1a11fd90..a679afd9 100644
--- a/tests/unit/test_idempotency.py
+++ b/tests/unit/test_idempotency.py
@@ -13,7 +13,7 @@
from tests import unittest
import re
-import mock
+from tests import mock
from botocore.handlers import generate_idempotent_uuid
diff --git a/tests/unit/test_loaders.py b/tests/unit/test_loaders.py
index ff25066f..7e7906a0 100644
--- a/tests/unit/test_loaders.py
+++ b/tests/unit/test_loaders.py
@@ -22,7 +22,7 @@
import os
import contextlib
import copy
-import mock
+from tests import mock
from botocore.exceptions import DataNotFoundError, UnknownServiceError
from botocore.loaders import JSONFileLoader
diff --git a/tests/unit/test_paginate.py b/tests/unit/test_paginate.py
index cc7d300d..18aa17fc 100644
--- a/tests/unit/test_paginate.py
+++ b/tests/unit/test_paginate.py
@@ -20,7 +20,7 @@ from botocore.paginate import TokenEncoder
from botocore.exceptions import PaginationError
from botocore.compat import six
-import mock
+from tests import mock
def encode_token(token):
diff --git a/tests/unit/test_retryhandler.py b/tests/unit/test_retryhandler.py
index eedd7718..9abce0fd 100644
--- a/tests/unit/test_retryhandler.py
+++ b/tests/unit/test_retryhandler.py
@@ -15,7 +15,7 @@
from tests import unittest
-import mock
+from tests import mock
from botocore import retryhandler
from botocore.exceptions import (
diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py
index 3d914ac3..b299b2cd 100644
--- a/tests/unit/test_session.py
+++ b/tests/unit/test_session.py
@@ -19,7 +19,7 @@ import logging
import tempfile
import shutil
-import mock
+from tests import mock
import botocore.session
import botocore.exceptions
diff --git a/tests/unit/test_session_legacy.py b/tests/unit/test_session_legacy.py
index 8fa69a18..4016d1b4 100644
--- a/tests/unit/test_session_legacy.py
+++ b/tests/unit/test_session_legacy.py
@@ -19,7 +19,7 @@ import logging
import tempfile
import shutil
-import mock
+from tests import mock
import botocore.session
import botocore.exceptions
diff --git a/tests/unit/test_signers.py b/tests/unit/test_signers.py
index 0fb36675..16b06f76 100644
--- a/tests/unit/test_signers.py
+++ b/tests/unit/test_signers.py
@@ -10,7 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import mock
+from tests import mock
import datetime
import json
diff --git a/tests/unit/test_stub.py b/tests/unit/test_stub.py
index fbc40633..eb276562 100644
--- a/tests/unit/test_stub.py
+++ b/tests/unit/test_stub.py
@@ -12,7 +12,7 @@
# language governing permissions and limitations under the License.
from tests import unittest
-import mock
+from tests import mock
from botocore.stub import Stubber
from botocore.exceptions import ParamValidationError, StubResponseError, UnStubbedResponseError
diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py
index 58360e04..07fad9f8 100644
--- a/tests/unit/test_utils.py
+++ b/tests/unit/test_utils.py
@@ -15,7 +15,7 @@ from tests import RawResponse
from dateutil.tz import tzutc, tzoffset
import datetime
import copy
-import mock
+from tests import mock
import botocore
from botocore import xform_name
diff --git a/tests/unit/test_waiters.py b/tests/unit/test_waiters.py
index c82063a1..56e5ab0a 100644
--- a/tests/unit/test_waiters.py
+++ b/tests/unit/test_waiters.py
@@ -13,7 +13,7 @@
import os
from tests import unittest, BaseEnvVar
-import mock
+from tests import mock
import botocore
from botocore.compat import six
--
2.29.2
From 3be64b7fc1dff89faf67b6c3fa5404d05bd68f53 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Mat=C4=9Bj=20Cepl?= <mcepl@cepl.eu>
Date: Mon, 31 Aug 2020 23:57:30 +0200
Subject: [PATCH 03/14] Remove superfluous -v in scripts/ci/run-integ-tests
(removed)
--
2.29.2
From 92966469ffca74f146672faf6a30bb7298a962d2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Mat=C4=9Bj=20Cepl?= <mcepl@cepl.eu>
Date: Tue, 1 Sep 2020 00:02:38 +0200
Subject: [PATCH 04/14] Change requested in the review.
---
tests/functional/test_paginate.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/functional/test_paginate.py b/tests/functional/test_paginate.py
index 974f4839..8bf84ca6 100644
--- a/tests/functional/test_paginate.py
+++ b/tests/functional/test_paginate.py
@@ -233,4 +233,4 @@ class TestTokenEncoding(unittest.TestCase):
encoded = TokenEncoder().encode(token_dict)
assert isinstance(encoded, six.string_types)
decoded = TokenDecoder().decode(encoded)
- self.assertEqual(decoded, token_dict)
+ assert decoded == token_dict
--
2.29.2
From c9e4d765f385c2e6ef46512021fac913bdd2fdef Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Mat=C4=9Bj=20Cepl?= <mcepl@cepl.eu>
Date: Tue, 1 Sep 2020 00:12:31 +0200
Subject: [PATCH 05/14] Cosmetic cleanup.
Mainly import mock from the test module, not directly.
---
tests/functional/test_apigateway.py | 5 +--
tests/functional/test_cloudsearchdomain.py | 2 -
tests/functional/test_credentials.py | 1 -
tests/functional/test_docdb.py | 6 ---
tests/functional/test_history.py | 4 --
tests/functional/test_iot_data.py | 3 +-
tests/functional/test_machinelearning.py | 2 -
tests/functional/test_neptune.py | 6 ---
tests/functional/test_public_apis.py | 1 -
tests/functional/test_rds.py | 3 --
tests/functional/test_s3.py | 5 ++-
tests/functional/test_s3_control.py | 2 +-
tests/functional/test_six_threading.py | 6 +--
tests/unit/test_awsrequest.py | 33 ++++++++--------
tests/unit/test_discovery.py | 25 ++++++------
tests/unit/test_endpoint.py | 44 ++++++++++------------
tests/unit/test_eventstream.py | 29 +++++++-------
tests/unit/test_http_session.py | 26 ++++++-------
tests/unit/test_s3_addressing.py | 4 --
19 files changed, 79 insertions(+), 128 deletions(-)
diff --git a/tests/functional/test_apigateway.py b/tests/functional/test_apigateway.py
index 547ce1e3..b2b78acb 100644
--- a/tests/functional/test_apigateway.py
+++ b/tests/functional/test_apigateway.py
@@ -10,8 +10,6 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from tests import mock
-
from tests import BaseSessionTest, ClientHTTPStubber
@@ -36,4 +34,5 @@ class TestApiGateway(BaseSessionTest):
self.client.get_export(**params)
request = self.http_stubber.requests[0]
self.assertEqual(request.method, 'GET')
- self.assertEqual(request.headers.get('Accept'), b'application/yaml')
+ self.assertEqual(request.headers.get('Accept'),
+ b'application/yaml')
diff --git a/tests/functional/test_cloudsearchdomain.py b/tests/functional/test_cloudsearchdomain.py
index 1933644f..a39919b5 100644
--- a/tests/functional/test_cloudsearchdomain.py
+++ b/tests/functional/test_cloudsearchdomain.py
@@ -10,8 +10,6 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from tests import mock
-
from tests import BaseSessionTest, ClientHTTPStubber
diff --git a/tests/functional/test_credentials.py b/tests/functional/test_credentials.py
index 5d032baf..bee2b540 100644
--- a/tests/functional/test_credentials.py
+++ b/tests/functional/test_credentials.py
@@ -35,7 +35,6 @@ from botocore.credentials import CanonicalNameCredentialSourcer
from botocore.credentials import DeferredRefreshableCredentials
from botocore.credentials import create_credential_resolver
from botocore.credentials import JSONFileCache
-from botocore.credentials import SSOProvider
from botocore.config import Config
from botocore.session import Session
from botocore.exceptions import InvalidConfigError, InfiniteLoopConfigError
diff --git a/tests/functional/test_docdb.py b/tests/functional/test_docdb.py
index 0c35d408..1408b0a7 100644
--- a/tests/functional/test_docdb.py
+++ b/tests/functional/test_docdb.py
@@ -10,13 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from tests import mock
-from contextlib import contextmanager
-
-import botocore.session
from tests import BaseSessionTest, ClientHTTPStubber
-from botocore.stub import Stubber
-from tests import unittest
class TestDocDBPresignUrlInjection(BaseSessionTest):
diff --git a/tests/functional/test_history.py b/tests/functional/test_history.py
index ae0f1654..e645ac73 100644
--- a/tests/functional/test_history.py
+++ b/tests/functional/test_history.py
@@ -1,7 +1,3 @@
-from contextlib import contextmanager
-
-from tests import mock
-
from tests import BaseSessionTest, ClientHTTPStubber
from botocore.history import BaseHistoryHandler
from botocore.history import get_global_history_recorder
diff --git a/tests/functional/test_iot_data.py b/tests/functional/test_iot_data.py
index e9eeb8a0..cb3c2bc9 100644
--- a/tests/functional/test_iot_data.py
+++ b/tests/functional/test_iot_data.py
@@ -10,8 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-import sys
-from tests import unittest, mock, BaseSessionTest
+from tests import mock, BaseSessionTest
from botocore.exceptions import UnsupportedTLSVersionWarning
diff --git a/tests/functional/test_machinelearning.py b/tests/functional/test_machinelearning.py
index 51fc3fae..d96fb4af 100644
--- a/tests/functional/test_machinelearning.py
+++ b/tests/functional/test_machinelearning.py
@@ -10,8 +10,6 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from tests import mock
-
from tests import BaseSessionTest, ClientHTTPStubber
diff --git a/tests/functional/test_neptune.py b/tests/functional/test_neptune.py
index aafbdd4b..5bd5fdbc 100644
--- a/tests/functional/test_neptune.py
+++ b/tests/functional/test_neptune.py
@@ -10,13 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from tests import mock
-from contextlib import contextmanager
-
-import botocore.session
from tests import BaseSessionTest, ClientHTTPStubber
-from botocore.stub import Stubber
-from tests import unittest
class TestNeptunePresignUrlInjection(BaseSessionTest):
diff --git a/tests/functional/test_public_apis.py b/tests/functional/test_public_apis.py
index b9ad5948..29de2f16 100644
--- a/tests/functional/test_public_apis.py
+++ b/tests/functional/test_public_apis.py
@@ -16,7 +16,6 @@ from tests import mock
from tests import ClientHTTPStubber
from botocore.session import Session
-from botocore.exceptions import NoCredentialsError
from botocore import xform_name
diff --git a/tests/functional/test_rds.py b/tests/functional/test_rds.py
index 8bf85d5e..71ba41ce 100644
--- a/tests/functional/test_rds.py
+++ b/tests/functional/test_rds.py
@@ -10,9 +10,6 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from tests import mock
-from contextlib import contextmanager
-
import botocore.session
from tests import BaseSessionTest, ClientHTTPStubber
from botocore.stub import Stubber
diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py
index 5354a52b..34b256d4 100644
--- a/tests/functional/test_s3.py
+++ b/tests/functional/test_s3.py
@@ -13,11 +13,12 @@
import re
from tests import temporary_file
-from tests import unittest, mock, BaseSessionTest, create_session, ClientHTTPStubber
+from tests import (unittest, mock, BaseSessionTest, create_session,
+ ClientHTTPStubber)
import botocore.session
from botocore.config import Config
-from botocore.compat import datetime, urlsplit, parse_qs
+from botocore.compat import urlsplit, parse_qs
from botocore.exceptions import ParamValidationError, ClientError
from botocore.exceptions import InvalidS3UsEast1RegionalEndpointConfigError
from botocore.parsers import ResponseParserError
diff --git a/tests/functional/test_s3_control.py b/tests/functional/test_s3_control.py
index e45a0577..afb7e171 100644
--- a/tests/functional/test_s3_control.py
+++ b/tests/functional/test_s3_control.py
@@ -10,7 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
-from tests import unittest, mock, BaseSessionTest, create_session
+from tests import mock, BaseSessionTest
from botocore.config import Config
from botocore.awsrequest import AWSResponse
diff --git a/tests/functional/test_six_threading.py b/tests/functional/test_six_threading.py
index 18f87ba5..d71796c6 100644
--- a/tests/functional/test_six_threading.py
+++ b/tests/functional/test_six_threading.py
@@ -1,7 +1,7 @@
"""
Regression test for six issue #98 (https://github.com/benjaminp/six/issues/98)
"""
-from mock import patch
+from tests import mock
import sys
import threading
import time
@@ -48,8 +48,8 @@ class _ExampleThread(threading.Thread):
def test_six_thread_safety():
_reload_six()
- with patch('botocore.vendored.six.moves.__class__.__setattr__',
- wraps=_wrapped_setattr):
+ with mock.patch('botocore.vendored.six.moves.__class__.__setattr__',
+ wraps=_wrapped_setattr):
threads = []
for i in range(2):
t = _ExampleThread()
diff --git a/tests/unit/test_awsrequest.py b/tests/unit/test_awsrequest.py
index e884af7c..69642e99 100644
--- a/tests/unit/test_awsrequest.py
+++ b/tests/unit/test_awsrequest.py
@@ -18,17 +18,14 @@ import tempfile
import shutil
import io
import socket
-import sys
-try:
- from mock import Mock, patch
-except ImportError:
- from unittest.mock import Mock, patch
+from tests import mock
from urllib3.connectionpool import HTTPConnectionPool, HTTPSConnectionPool
from botocore.exceptions import UnseekableStreamError
-from botocore.awsrequest import AWSRequest, AWSPreparedRequest, AWSResponse
-from botocore.awsrequest import AWSHTTPConnection, AWSHTTPSConnection, HeadersDict
+from botocore.awsrequest import AWSRequest, AWSResponse
+from botocore.awsrequest import (AWSHTTPConnection, AWSHTTPSConnection,
+ HeadersDict)
from botocore.awsrequest import prepare_request_dict, create_request_object
from botocore.compat import file_type, six
@@ -256,7 +253,7 @@ class TestAWSRequest(unittest.TestCase):
class TestAWSResponse(unittest.TestCase):
def setUp(self):
self.response = AWSResponse('http://url.com', 200, HeadersDict(), None)
- self.response.raw = Mock()
+ self.response.raw = mock.Mock()
def set_raw_stream(self, blobs):
def stream(*args, **kwargs):
@@ -291,8 +288,8 @@ class TestAWSHTTPConnection(unittest.TestCase):
conn._tunnel_headers = {'key': 'value'}
# Create a mock response.
- self.mock_response = Mock()
- self.mock_response.fp = Mock()
+ self.mock_response = mock.Mock()
+ self.mock_response.fp = mock.Mock()
# Imitate readline function by creating a list to be sent as
# a side effect of the mocked readline to be able to track how the
@@ -315,12 +312,12 @@ class TestAWSHTTPConnection(unittest.TestCase):
response_components[0], int(response_components[1]),
response_components[2]
)
- conn.response_class = Mock()
+ conn.response_class = mock.Mock()
conn.response_class.return_value = self.mock_response
return conn
def test_expect_100_continue_returned(self):
- with patch('urllib3.util.wait_for_read') as wait_mock:
+ with mock.patch('urllib3.util.wait_for_read') as wait_mock:
# Shows the server first sending a 100 continue response
# then a 200 ok response.
s = FakeSocket(b'HTTP/1.1 100 Continue\r\n\r\nHTTP/1.1 200 OK\r\n')
@@ -336,7 +333,7 @@ class TestAWSHTTPConnection(unittest.TestCase):
self.assertEqual(response.status, 200)
def test_handles_expect_100_with_different_reason_phrase(self):
- with patch('urllib3.util.wait_for_read') as wait_mock:
+ with mock.patch('urllib3.util.wait_for_read') as wait_mock:
# Shows the server first sending a 100 continue response
# then a 200 ok response.
s = FakeSocket(b'HTTP/1.1 100 (Continue)\r\n\r\nHTTP/1.1 200 OK\r\n')
@@ -358,7 +355,7 @@ class TestAWSHTTPConnection(unittest.TestCase):
# When using squid as an HTTP proxy, it will also send
# a Connection: keep-alive header back with the 100 continue
# response. We need to ensure we handle this case.
- with patch('urllib3.util.wait_for_read') as wait_mock:
+ with mock.patch('urllib3.util.wait_for_read') as wait_mock:
# Shows the server first sending a 100 continue response
# then a 500 response. We're picking 500 to confirm we
# actually parse the response instead of getting the
@@ -381,7 +378,7 @@ class TestAWSHTTPConnection(unittest.TestCase):
def test_expect_100_continue_sends_307(self):
# This is the case where we send a 100 continue and the server
# immediately sends a 307
- with patch('urllib3.util.wait_for_read') as wait_mock:
+ with mock.patch('urllib3.util.wait_for_read') as wait_mock:
# Shows the server first sending a 100 continue response
# then a 200 ok response.
s = FakeSocket(
@@ -399,7 +396,7 @@ class TestAWSHTTPConnection(unittest.TestCase):
self.assertEqual(response.status, 307)
def test_expect_100_continue_no_response_from_server(self):
- with patch('urllib3.util.wait_for_read') as wait_mock:
+ with mock.patch('urllib3.util.wait_for_read') as wait_mock:
# Shows the server first sending a 100 continue response
# then a 200 ok response.
s = FakeSocket(
@@ -480,7 +477,7 @@ class TestAWSHTTPConnection(unittest.TestCase):
conn.sock = s
# Test that the standard library method was used by patching out
# the ``_tunnel`` method and seeing if the std lib method was called.
- with patch('urllib3.connection.HTTPConnection._tunnel') as mock_tunnel:
+ with mock.patch('urllib3.connection.HTTPConnection._tunnel') as mock_tunnel:
conn._tunnel()
self.assertTrue(mock_tunnel.called)
@@ -498,7 +495,7 @@ class TestAWSHTTPConnection(unittest.TestCase):
def test_state_reset_on_connection_close(self):
# This simulates what urllib3 does with connections
# in its connection pool logic.
- with patch('urllib3.util.wait_for_read') as wait_mock:
+ with mock.patch('urllib3.util.wait_for_read') as wait_mock:
# First fast fail with a 500 response when we first
# send the expect header.
diff --git a/tests/unit/test_discovery.py b/tests/unit/test_discovery.py
index d6196197..719b99be 100644
--- a/tests/unit/test_discovery.py
+++ b/tests/unit/test_discovery.py
@@ -1,8 +1,5 @@
import time
-try:
- from mock import Mock, call
-except ImportError:
- from unittest.mock import Mock, call
+from tests import mock
from tests import unittest
from botocore.awsrequest import AWSRequest
@@ -151,9 +148,9 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
def construct_manager(self, cache=None, time=None, side_effect=None):
self.service_model = ServiceModel(self.service_description)
- self.meta = Mock(spec=ClientMeta)
+ self.meta = mock.Mock(spec=ClientMeta)
self.meta.service_model = self.service_model
- self.client = Mock()
+ self.client = mock.Mock()
if side_effect is None:
side_effect = [{
'Endpoints': [{
@@ -314,7 +311,7 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
# This second call should be blocked as we just failed
endpoint = self.manager.describe_endpoint(**kwargs)
self.assertIsNone(endpoint)
- self.client.describe_endpoints.call_args_list == [call()]
+ self.client.describe_endpoints.call_args_list == [mock.call()]
def test_describe_endpoint_optional_fails_stale_cache(self):
key = ()
@@ -329,7 +326,7 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
# This second call shouldn't go through as we just failed
endpoint = self.manager.describe_endpoint(**kwargs)
self.assertEqual(endpoint, 'old.com')
- self.client.describe_endpoints.call_args_list == [call()]
+ self.client.describe_endpoints.call_args_list == [mock.call()]
def test_describe_endpoint_required_fails_no_cache(self):
side_effect = [ConnectionError(error=None)] * 2
@@ -356,7 +353,7 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
# We have a stale endpoint, so this shouldn't fail or force a refresh
endpoint = self.manager.describe_endpoint(**kwargs)
self.assertEqual(endpoint, 'old.com')
- self.client.describe_endpoints.call_args_list == [call()]
+ self.client.describe_endpoints.call_args_list == [mock.call()]
def test_describe_endpoint_required_force_refresh_success(self):
side_effect = [
@@ -371,13 +368,13 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
# First call will fail
with self.assertRaises(EndpointDiscoveryRefreshFailed):
self.manager.describe_endpoint(**kwargs)
- self.client.describe_endpoints.call_args_list == [call()]
+ self.client.describe_endpoints.call_args_list == [mock.call()]
# Force a refresh if the cache is empty but discovery is required
endpoint = self.manager.describe_endpoint(**kwargs)
self.assertEqual(endpoint, 'new.com')
def test_describe_endpoint_retries_after_failing(self):
- fake_time = Mock()
+ fake_time = mock.Mock()
fake_time.side_effect = [0, 100, 200]
side_effect = [
ConnectionError(error=None),
@@ -390,7 +387,7 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
kwargs = {'Operation': 'TestDiscoveryOptional'}
endpoint = self.manager.describe_endpoint(**kwargs)
self.assertIsNone(endpoint)
- self.client.describe_endpoints.call_args_list == [call()]
+ self.client.describe_endpoints.call_args_list == [mock.call()]
# Second time should try again as enough time has elapsed
endpoint = self.manager.describe_endpoint(**kwargs)
self.assertEqual(endpoint, 'new.com')
@@ -399,12 +396,12 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
class TestEndpointDiscoveryHandler(BaseEndpointDiscoveryTest):
def setUp(self):
super(TestEndpointDiscoveryHandler, self).setUp()
- self.manager = Mock(spec=EndpointDiscoveryManager)
+ self.manager = mock.Mock(spec=EndpointDiscoveryManager)
self.handler = EndpointDiscoveryHandler(self.manager)
self.service_model = ServiceModel(self.service_description)
def test_register_handler(self):
- events = Mock(spec=HierarchicalEmitter)
+ events = mock.Mock(spec=HierarchicalEmitter)
self.handler.register(events, 'foo-bar')
events.register.assert_any_call(
'before-parameter-build.foo-bar', self.handler.gather_identifiers
diff --git a/tests/unit/test_endpoint.py b/tests/unit/test_endpoint.py
index 2b6d896e..b9a97df5 100644
--- a/tests/unit/test_endpoint.py
+++ b/tests/unit/test_endpoint.py
@@ -13,17 +13,11 @@
import socket
from tests import unittest
-try:
- from mock import Mock, patch, sentinel
-except ImportError:
- from unittest.mock import Mock, patch, sentinel
+from tests import mock
from botocore.compat import six
-from botocore.awsrequest import AWSRequest
from botocore.endpoint import Endpoint, DEFAULT_TIMEOUT
from botocore.endpoint import EndpointCreator
-from botocore.exceptions import EndpointConnectionError
-from botocore.exceptions import ConnectionClosedError
from botocore.exceptions import HTTPClientError
from botocore.httpsession import URLLib3Session
from botocore.model import OperationModel, ServiceId
@@ -57,21 +51,21 @@ class RecordStreamResets(six.StringIO):
class TestEndpointBase(unittest.TestCase):
def setUp(self):
- self.op = Mock()
+ self.op = mock.Mock()
self.op.has_streaming_output = False
self.op.has_event_stream_output = False
self.op.metadata = {'protocol': 'json'}
- self.event_emitter = Mock()
+ self.event_emitter = mock.Mock()
self.event_emitter.emit.return_value = []
- self.factory_patch = patch(
+ self.factory_patch = mock.patch(
'botocore.parsers.ResponseParserFactory')
self.factory = self.factory_patch.start()
self.endpoint = Endpoint(
'https://ec2.us-west-2.amazonaws.com/',
endpoint_prefix='ec2',
event_emitter=self.event_emitter)
- self.http_session = Mock()
- self.http_session.send.return_value = Mock(
+ self.http_session = mock.Mock()
+ self.http_session.send.return_value = mock.Mock(
status_code=200, headers={}, content=b'{"Foo": "bar"}',
)
self.endpoint.http_session = self.http_session
@@ -123,24 +117,24 @@ class TestEndpointFeatures(TestEndpointBase):
def test_make_request_with_context(self):
r = request_dict()
r['context'] = {'signing': {'region': 'us-west-2'}}
- with patch('botocore.endpoint.Endpoint.prepare_request') as prepare:
+ with mock.patch('botocore.endpoint.Endpoint.prepare_request') as prepare:
self.endpoint.make_request(self.op, r)
request = prepare.call_args[0][0]
self.assertEqual(request.context['signing']['region'], 'us-west-2')
def test_parses_modeled_exception_fields(self):
# Setup the service model to have exceptions to generate the mapping
- self.service_model = Mock(spec=ServiceModel)
+ self.service_model = mock.Mock(spec=ServiceModel)
self.op.service_model = self.service_model
- self.exception_shape = Mock(spec=StructureShape)
+ self.exception_shape = mock.Mock(spec=StructureShape)
shape_for_error_code = self.service_model.shape_for_error_code
shape_for_error_code.return_value = self.exception_shape
r = request_dict()
- self.http_session.send.return_value = Mock(
+ self.http_session.send.return_value = mock.Mock(
status_code=400, headers={}, content=b'',
)
- parser = Mock()
+ parser = mock.Mock()
parser.parse.side_effect = [
{
'Error': {
@@ -172,7 +166,7 @@ class TestRetryInterface(TestEndpointBase):
def setUp(self):
super(TestRetryInterface, self).setUp()
self.retried_on_exception = None
- self._operation = Mock(spec=OperationModel)
+ self._operation = mock.Mock(spec=OperationModel)
self._operation.name = 'DescribeInstances'
self._operation.metadata = {'protocol': 'query'}
self._operation.service_model.service_id = ServiceId('EC2')
@@ -222,7 +216,7 @@ class TestRetryInterface(TestEndpointBase):
def test_retry_attempts_added_to_response_metadata(self):
self.event_emitter.emit.side_effect = self.get_emitter_responses(
num_retries=1)
- parser = Mock()
+ parser = mock.Mock()
parser.parse.return_value = {'ResponseMetadata': {}}
self.factory.return_value.create_parser.return_value = parser
response = self.endpoint.make_request(self._operation, request_dict())
@@ -231,7 +225,7 @@ class TestRetryInterface(TestEndpointBase):
def test_retry_attempts_is_zero_when_not_retried(self):
self.event_emitter.emit.side_effect = self.get_emitter_responses(
num_retries=0)
- parser = Mock()
+ parser = mock.Mock()
parser.parse.return_value = {'ResponseMetadata': {}}
self.factory.return_value.create_parser.return_value = parser
response = self.endpoint.make_request(self._operation, request_dict())
@@ -253,7 +247,7 @@ class TestS3ResetStreamOnRetry(TestEndpointBase):
return 0
def test_reset_stream_on_retry(self):
- op = Mock()
+ op = mock.Mock()
body = RecordStreamResets('foobar')
op.name = 'PutObject'
op.has_streaming_output = True
@@ -281,14 +275,14 @@ class TestEventStreamBody(TestEndpointBase):
class TestEndpointCreator(unittest.TestCase):
def setUp(self):
- self.service_model = Mock(
+ self.service_model = mock.Mock(
endpoint_prefix='ec2', signature_version='v2',
signing_name='ec2')
self.environ = {}
- self.environ_patch = patch('os.environ', self.environ)
+ self.environ_patch = mock.patch('os.environ', self.environ)
self.environ_patch.start()
- self.creator = EndpointCreator(Mock())
- self.mock_session = Mock(spec=URLLib3Session)
+ self.creator = EndpointCreator(mock.Mock())
+ self.mock_session = mock.Mock(spec=URLLib3Session)
def tearDown(self):
self.environ_patch.stop()
diff --git a/tests/unit/test_eventstream.py b/tests/unit/test_eventstream.py
index 07a77cf8..45336bdc 100644
--- a/tests/unit/test_eventstream.py
+++ b/tests/unit/test_eventstream.py
@@ -12,10 +12,7 @@
# language governing permissions and limitations under the License.
"""Unit tests for the binary event stream decoder. """
-try:
- from mock import Mock
-except ImportError:
- from unittest.mock import Mock
+from tests import mock
from botocore.parsers import EventStreamXMLParser
from botocore.eventstream import (
@@ -429,7 +426,7 @@ def test_unpack_prelude():
def create_mock_raw_stream(*data):
- raw_stream = Mock()
+ raw_stream = mock.Mock()
def generator():
for chunk in data:
@@ -443,8 +440,8 @@ def test_event_stream_wrapper_iteration():
b"\x00\x00\x00+\x00\x00\x00\x0e4\x8b\xec{\x08event-id\x04\x00",
b"\x00\xa0\x0c{'foo':'bar'}\xd3\x89\x02\x85",
)
- parser = Mock(spec=EventStreamXMLParser)
- output_shape = Mock()
+ parser = mock.Mock(spec=EventStreamXMLParser)
+ output_shape = mock.Mock()
event_stream = EventStream(raw_stream, output_shape, parser, '')
events = list(event_stream)
assert len(events) == 1
@@ -460,9 +457,9 @@ def test_event_stream_wrapper_iteration():
def test_eventstream_wrapper_iteration_error():
try:
raw_stream = create_mock_raw_stream(ERROR_EVENT_MESSAGE[0])
- parser = Mock(spec=EventStreamXMLParser)
+ parser = mock.Mock(spec=EventStreamXMLParser)
parser.parse.return_value = {}
- output_shape = Mock()
+ output_shape = mock.Mock()
event_stream = EventStream(raw_stream, output_shape, parser, '')
list(event_stream)
except EventStreamError:
@@ -473,7 +470,7 @@ def test_eventstream_wrapper_iteration_error():
def test_event_stream_wrapper_close():
- raw_stream = Mock()
+ raw_stream = mock.Mock()
event_stream = EventStream(raw_stream, None, None, '')
event_stream.close()
raw_stream.close.assert_called_once_with()
@@ -485,8 +482,8 @@ def test_event_stream_initial_response():
b'\x05event\x0b:event-type\x07\x00\x10initial-response\r:content-type',
b'\x07\x00\ttext/json{"InitialResponse": "sometext"}\xf6\x98$\x83'
)
- parser = Mock(spec=EventStreamXMLParser)
- output_shape = Mock()
+ parser = mock.Mock(spec=EventStreamXMLParser)
+ output_shape = mock.Mock()
event_stream = EventStream(raw_stream, output_shape, parser, '')
event = event_stream.get_initial_response()
headers = {
@@ -505,8 +502,8 @@ def test_event_stream_initial_response_wrong_type():
b"\x00\x00\x00+\x00\x00\x00\x0e4\x8b\xec{\x08event-id\x04\x00",
b"\x00\xa0\x0c{'foo':'bar'}\xd3\x89\x02\x85",
)
- parser = Mock(spec=EventStreamXMLParser)
- output_shape = Mock()
+ parser = mock.Mock(spec=EventStreamXMLParser)
+ output_shape = mock.Mock()
event_stream = EventStream(raw_stream, output_shape, parser, '')
event_stream.get_initial_response()
except NoInitialResponseError:
@@ -519,8 +516,8 @@ def test_event_stream_initial_response_wrong_type():
def test_event_stream_initial_response_no_event():
try:
raw_stream = create_mock_raw_stream(b'')
- parser = Mock(spec=EventStreamXMLParser)
- output_shape = Mock()
+ parser = mock.Mock(spec=EventStreamXMLParser)
+ output_shape = mock.Mock()
event_stream = EventStream(raw_stream, output_shape, parser, '')
event_stream.get_initial_response()
except NoInitialResponseError:
diff --git a/tests/unit/test_http_session.py b/tests/unit/test_http_session.py
index da84c94b..c11eb701 100644
--- a/tests/unit/test_http_session.py
+++ b/tests/unit/test_http_session.py
@@ -1,13 +1,9 @@
import socket
-try:
- from mock import patch, Mock, ANY
-except ImportError:
- from unittest.mock import patch, Mock, ANY
+from tests import mock
from tests import unittest
from urllib3.exceptions import NewConnectionError, ProtocolError
-from botocore.vendored import six
from botocore.awsrequest import AWSRequest
from botocore.awsrequest import AWSHTTPConnectionPool, AWSHTTPSConnectionPool
from botocore.httpsession import get_cert_path
@@ -59,7 +55,7 @@ class TestHttpSessionUtils(unittest.TestCase):
self.assertEqual(path, cert_path)
def test_get_cert_path_certifi_or_default(self):
- with patch('botocore.httpsession.where') as where:
+ with mock.patch('botocore.httpsession.where') as where:
path = '/bundle/path'
where.return_value = path
cert_path = get_cert_path(True)
@@ -75,17 +71,17 @@ class TestURLLib3Session(unittest.TestCase):
data=b'',
)
- self.response = Mock()
+ self.response = mock.Mock()
self.response.headers = {}
self.response.stream.return_value = b''
- self.pool_manager = Mock()
- self.connection = Mock()
+ self.pool_manager = mock.Mock()
+ self.connection = mock.Mock()
self.connection.urlopen.return_value = self.response
self.pool_manager.connection_from_url.return_value = self.connection
- self.pool_patch = patch('botocore.httpsession.PoolManager')
- self.proxy_patch = patch('botocore.httpsession.proxy_from_url')
+ self.pool_patch = mock.patch('botocore.httpsession.PoolManager')
+ self.proxy_patch = mock.patch('botocore.httpsession.proxy_from_url')
self.pool_manager_cls = self.pool_patch.start()
self.proxy_manager_fun = self.proxy_patch.start()
self.pool_manager_cls.return_value = self.pool_manager
@@ -104,7 +100,7 @@ class TestURLLib3Session(unittest.TestCase):
url=url,
body=body,
headers=headers,
- retries=ANY,
+ retries=mock.ANY,
assert_same_host=False,
preload_content=False,
decode_content=False,
@@ -114,9 +110,9 @@ class TestURLLib3Session(unittest.TestCase):
def _assert_manager_call(self, manager, *assert_args, **assert_kwargs):
call_kwargs = {
'strict': True,
- 'maxsize': ANY,
- 'timeout': ANY,
- 'ssl_context': ANY,
+ 'maxsize': mock.ANY,
+ 'timeout': mock.ANY,
+ 'ssl_context': mock.ANY,
'socket_options': [],
'cert_file': None,
'key_file': None,
diff --git a/tests/unit/test_s3_addressing.py b/tests/unit/test_s3_addressing.py
index ee2768fd..4dbff04b 100644
--- a/tests/unit/test_s3_addressing.py
+++ b/tests/unit/test_s3_addressing.py
@@ -16,10 +16,6 @@
import os
from tests import BaseSessionTest, ClientHTTPStubber
-try:
- from mock import patch, Mock
-except ImportError:
- from unittest.mock import patch, Mock
from botocore.compat import OrderedDict
from botocore.compat import six
--
2.29.2
From 7a90a5c4c0915e94e770852577ab27375949b023 Mon Sep 17 00:00:00 2001
From: Zidaan Dutta <ziddutta@amazon.com>
Date: Mon, 19 Oct 2020 19:17:58 -0400
Subject: [PATCH 06/14] pytest migration of botocore functional tests
---
tests/functional/csm/test_monitoring.py | 12 +-
tests/functional/docs/__init__.py | 12 +-
tests/functional/docs/test_alias.py | 12 +-
tests/functional/docs/test_autoscaling.py | 2 +-
tests/functional/docs/test_ec2.py | 2 +-
tests/functional/docs/test_s3.py | 3 +-
tests/functional/leak/test_resource_leaks.py | 12 +-
tests/functional/retries/test_bucket.py | 8 +-
tests/functional/retries/test_quota.py | 4 +-
tests/functional/test_apigateway.py | 5 +-
tests/functional/test_client.py | 6 +-
tests/functional/test_client_class_names.py | 3 +-
tests/functional/test_client_metadata.py | 12 +-
tests/functional/test_cloudsearchdomain.py | 6 +-
tests/functional/test_cognito_idp.py | 156 ++++++-----
tests/functional/test_credentials.py | 61 ++---
tests/functional/test_discovery.py | 30 +--
tests/functional/test_docdb.py | 4 +-
tests/functional/test_dynamodb.py | 8 +-
tests/functional/test_ec2.py | 36 +--
tests/functional/test_event_alias.py | 18 +-
tests/functional/test_events.py | 45 ++--
tests/functional/test_history.py | 43 ++-
tests/functional/test_iot_data.py | 6 +-
tests/functional/test_kinesis.py | 20 +-
tests/functional/test_lex.py | 4 +-
tests/functional/test_loaders.py | 2 +-
tests/functional/test_machinelearning.py | 2 +-
tests/functional/test_model_completeness.py | 17 +-
tests/functional/test_modeled_exceptions.py | 17 +-
tests/functional/test_neptune.py | 5 +-
tests/functional/test_paginate.py | 25 +-
tests/functional/test_paginator_config.py | 14 +-
tests/functional/test_public_apis.py | 16 +-
tests/functional/test_rds.py | 10 +-
tests/functional/test_regions.py | 102 +++-----
tests/functional/test_response_shadowing.py | 25 +-
tests/functional/test_retry.py | 12 +-
tests/functional/test_route53.py | 8 +-
tests/functional/test_s3.py | 262 +++++++------------
tests/functional/test_s3_control.py | 14 +-
tests/functional/test_sagemaker.py | 8 +-
tests/functional/test_service_alias.py | 7 +-
tests/functional/test_service_names.py | 14 +-
tests/functional/test_session.py | 20 +-
tests/functional/test_six_imports.py | 21 +-
tests/functional/test_sts.py | 7 +-
tests/functional/test_stub.py | 46 ++--
tests/functional/test_utils.py | 8 +-
tests/functional/test_waiter_config.py | 35 +--
50 files changed, 557 insertions(+), 670 deletions(-)
diff --git a/tests/functional/csm/test_monitoring.py b/tests/functional/csm/test_monitoring.py
index 53ab1369..93ee0397 100644
--- a/tests/functional/csm/test_monitoring.py
+++ b/tests/functional/csm/test_monitoring.py
@@ -17,6 +17,7 @@ import logging
import os
import socket
import threading
+import pytest
from tests import mock
@@ -45,13 +46,6 @@ class NonRetryableException(Exception):
EXPECTED_EXCEPTIONS_THROWN = (
botocore.exceptions.ClientError, NonRetryableException, RetryableException)
-
-def test_client_monitoring():
- test_cases = _load_test_cases()
- for case in test_cases:
- _run_test_case(case)
-
-
def _load_test_cases():
with open(CASES_FILE) as f:
loaded_tests = json.loads(f.read())
@@ -59,7 +53,6 @@ def _load_test_cases():
_replace_expected_anys(test_cases)
return test_cases
-
def _get_cases_with_defaults(loaded_tests):
cases = []
defaults = loaded_tests['defaults']
@@ -174,6 +167,9 @@ def _add_stubbed_response(stubber, attempt_response):
content = b'{}'
stubber.add_response(status=status_code, headers=headers, body=content)
+@pytest.mark.parametrize("test_case", _load_test_cases())
+def test_client_monitoring(test_case):
+ _run_test_case(test_case)
class MonitoringListener(threading.Thread):
_PACKET_SIZE = 1024 * 8
diff --git a/tests/functional/docs/__init__.py b/tests/functional/docs/__init__.py
index fc756842..0573bc70 100644
--- a/tests/functional/docs/__init__.py
+++ b/tests/functional/docs/__init__.py
@@ -21,23 +21,23 @@ class BaseDocsFunctionalTest(unittest.TestCase):
def assert_contains_line(self, line, contents):
contents = contents.decode('utf-8')
- self.assertIn(line, contents)
+ assert line in contents
def assert_contains_lines_in_order(self, lines, contents):
contents = contents.decode('utf-8')
for line in lines:
- self.assertIn(line, contents)
+ assert line in contents
beginning = contents.find(line)
contents = contents[(beginning + len(line)):]
def assert_not_contains_line(self, line, contents):
contents = contents.decode('utf-8')
- self.assertNotIn(line, contents)
+ assert line not in contents
def assert_not_contains_lines(self, lines, contents):
contents = contents.decode('utf-8')
for line in lines:
- self.assertNotIn(line, contents)
+ assert line not in contents
def get_title_section_for(self, service_name):
contents = ServiceDocumenter(
@@ -52,7 +52,7 @@ class BaseDocsFunctionalTest(unittest.TestCase):
contents = contents.decode('utf-8')
start_method_document = ' .. py:method:: %s(' % operation_name
start_index = contents.find(start_method_document)
- self.assertNotEqual(start_index, -1, 'Method is not found in contents')
+ assert start_index != -1, 'Method is not found in contents'
contents = contents[start_index:]
end_index = contents.find(
' .. py:method::', len(start_method_document))
@@ -63,7 +63,7 @@ class BaseDocsFunctionalTest(unittest.TestCase):
contents = contents.decode('utf-8')
start_param_document = ' :type %s:' % param_name
start_index = contents.find(start_param_document)
- self.assertNotEqual(start_index, -1, 'Param is not found in contents')
+ assert start_index != -1, 'Param is not found in contents'
contents = contents[start_index:]
end_index = contents.find(' :type', len(start_param_document))
contents = contents[:end_index]
diff --git a/tests/functional/docs/test_alias.py b/tests/functional/docs/test_alias.py
index 38201134..bb3a004f 100644
--- a/tests/functional/docs/test_alias.py
+++ b/tests/functional/docs/test_alias.py
@@ -27,10 +27,10 @@ class TestAliasesDocumented(BaseDocsFunctionalTest):
# Make sure the new parameters are in the documentation
# but the old names are not.
- self.assertIn(param_name_template % new_name, content)
- self.assertIn(param_type_template % new_name, content)
- self.assertIn(param_example_template % new_name, content)
+ assert param_name_template % new_name in content
+ assert param_type_template % new_name in content
+ assert param_example_template % new_name in content
- self.assertNotIn(param_name_template % original_name, content)
- self.assertNotIn(param_type_template % original_name, content)
- self.assertNotIn(param_example_template % original_name, content)
+ assert param_name_template % original_name not in content
+ assert param_type_template % original_name not in content
+ assert param_example_template % original_name not in content
\ No newline at end of file
diff --git a/tests/functional/docs/test_autoscaling.py b/tests/functional/docs/test_autoscaling.py
index 03c24557..cf60c8a6 100644
--- a/tests/functional/docs/test_autoscaling.py
+++ b/tests/functional/docs/test_autoscaling.py
@@ -17,4 +17,4 @@ class TestAutoscalingDocs(BaseDocsFunctionalTest):
def test_documents_encoding_of_user_data(self):
docs = self.get_parameter_documentation_from_service(
'autoscaling', 'create_launch_configuration', 'UserData')
- self.assertIn('base64 encoded automatically', docs.decode('utf-8'))
+ assert 'base64 encoded automatically' in docs.decode('utf-8')
diff --git a/tests/functional/docs/test_ec2.py b/tests/functional/docs/test_ec2.py
index 97424361..6e068780 100644
--- a/tests/functional/docs/test_ec2.py
+++ b/tests/functional/docs/test_ec2.py
@@ -17,7 +17,7 @@ class TestEc2Docs(BaseDocsFunctionalTest):
def test_documents_encoding_of_user_data(self):
docs = self.get_parameter_documentation_from_service(
'ec2', 'run_instances', 'UserData')
- self.assertIn('base64 encoded automatically', docs.decode('utf-8'))
+ assert 'base64 encoded automatically' in docs.decode('utf-8')
def test_copy_snapshot_presigned_url_is_autopopulated(self):
self.assert_is_documented_as_autopopulated_param(
diff --git a/tests/functional/docs/test_s3.py b/tests/functional/docs/test_s3.py
index a1d463c5..b3e7595e 100644
--- a/tests/functional/docs/test_s3.py
+++ b/tests/functional/docs/test_s3.py
@@ -40,8 +40,7 @@ class TestS3Docs(BaseDocsFunctionalTest):
for method_name in modified_methods:
method_contents = self.get_method_document_block(
method_name, service_contents)
- self.assertNotIn('ContentMD5=\'string\'',
- method_contents.decode('utf-8'))
+ assert 'ContentMD5=\'string\'' not in method_contents.decode('utf-8')
def test_copy_source_documented_as_union_type(self):
content = self.get_docstring_for_method('s3', 'copy_object')
diff --git a/tests/functional/leak/test_resource_leaks.py b/tests/functional/leak/test_resource_leaks.py
index df1c4fe7..a4adaf1e 100644
--- a/tests/functional/leak/test_resource_leaks.py
+++ b/tests/functional/leak/test_resource_leaks.py
@@ -34,7 +34,7 @@ class TestDoesNotLeakMemory(BaseClientDriverTest):
self.cmd('free_clients')
self.record_memory()
start, end = self.memory_samples
- self.assertTrue((end - start) < self.MAX_GROWTH_BYTES, (end - start))
+ assert (end - start) < self.MAX_GROWTH_BYTES
def test_create_memory_clients_in_loop(self):
# We need to first create clients and free then before
@@ -62,7 +62,7 @@ class TestDoesNotLeakMemory(BaseClientDriverTest):
self.cmd('free_clients')
self.record_memory()
start, end = self.memory_samples
- self.assertTrue((end - start) < self.MAX_GROWTH_BYTES, (end - start))
+ assert (end - start) < self.MAX_GROWTH_BYTES
def test_create_single_waiter_memory_constant(self):
self.cmd('create_waiter', 's3', 'bucket_exists')
@@ -73,7 +73,7 @@ class TestDoesNotLeakMemory(BaseClientDriverTest):
self.cmd('free_waiters')
self.record_memory()
start, end = self.memory_samples
- self.assertTrue((end - start) < self.MAX_GROWTH_BYTES, (end - start))
+ assert (end - start) < self.MAX_GROWTH_BYTES
def test_create_memory_waiters_in_loop(self):
# See ``test_create_memory_clients_in_loop`` to understand why
@@ -88,7 +88,7 @@ class TestDoesNotLeakMemory(BaseClientDriverTest):
self.cmd('free_waiters')
self.record_memory()
start, end = self.memory_samples
- self.assertTrue((end - start) < self.MAX_GROWTH_BYTES, (end - start))
+ assert (end - start) < self.MAX_GROWTH_BYTES
def test_create_single_paginator_memory_constant(self):
self.cmd('create_paginator', 's3', 'list_objects')
@@ -99,7 +99,7 @@ class TestDoesNotLeakMemory(BaseClientDriverTest):
self.cmd('free_paginators')
self.record_memory()
start, end = self.memory_samples
- self.assertTrue((end - start) < self.MAX_GROWTH_BYTES, (end - start))
+ assert (end - start) < self.MAX_GROWTH_BYTES
def test_create_memory_paginators_in_loop(self):
# See ``test_create_memory_clients_in_loop`` to understand why
@@ -114,4 +114,4 @@ class TestDoesNotLeakMemory(BaseClientDriverTest):
self.cmd('free_paginators')
self.record_memory()
start, end = self.memory_samples
- self.assertTrue((end - start) < self.MAX_GROWTH_BYTES, (end - start))
+ assert (end - start) < self.MAX_GROWTH_BYTES
diff --git a/tests/functional/retries/test_bucket.py b/tests/functional/retries/test_bucket.py
index 54e01fc6..9a7688b8 100644
--- a/tests/functional/retries/test_bucket.py
+++ b/tests/functional/retries/test_bucket.py
@@ -55,12 +55,12 @@ class TestTokenBucketThreading(unittest.TestCase):
token_bucket.max_rate = 100
thread.join()
end_time = time.time()
- self.assertLessEqual(end_time - start_time, 1.0 / min_rate)
+ assert end_time - start_time <= 1.0 / min_rate
def acquire_in_loop(self, token_bucket):
while not self.shutdown_threads:
try:
- self.assertTrue(token_bucket.acquire())
+ assert token_bucket.acquire()
thread_name = threading.current_thread().name
self.acquisitions_by_thread[thread_name] += 1
except Exception as e:
@@ -98,7 +98,7 @@ class TestTokenBucketThreading(unittest.TestCase):
self.shutdown_threads = True
for thread in all_threads:
thread.join()
- self.assertEqual(self.caught_exceptions, [])
+ assert self.caught_exceptions == []
distribution = self.acquisitions_by_thread.values()
mean = sum(distribution) / float(len(distribution))
# We can't really rely on any guarantees about evenly distributing
@@ -106,4 +106,4 @@ class TestTokenBucketThreading(unittest.TestCase):
# can sanity check that our implementation isn't drastically
# starving a thread. So we'll arbitrarily say that a thread
# can't have less than 20% of the mean allocations per thread.
- self.assertTrue(not any(x < (0.2 * mean) for x in distribution))
+ assert not any(x < (0.2 * mean) for x in distribution)
diff --git a/tests/functional/retries/test_quota.py b/tests/functional/retries/test_quota.py
index 00ffa24c..8d2a7a1a 100644
--- a/tests/functional/retries/test_quota.py
+++ b/tests/functional/retries/test_quota.py
@@ -38,5 +38,5 @@ class TestRetryQuota(unittest.TestCase):
for thread in threads:
thread.join()
for seen_capacity in self.seen_capacities:
- self.assertGreaterEqual(seen_capacity, 0)
- self.assertLessEqual(seen_capacity, self.max_capacity)
+ assert seen_capacity >= 0
+ assert seen_capacity <= self.max_capacity
diff --git a/tests/functional/test_apigateway.py b/tests/functional/test_apigateway.py
index b2b78acb..2bb68a50 100644
--- a/tests/functional/test_apigateway.py
+++ b/tests/functional/test_apigateway.py
@@ -33,6 +33,5 @@ class TestApiGateway(BaseSessionTest):
with self.http_stubber:
self.client.get_export(**params)
request = self.http_stubber.requests[0]
- self.assertEqual(request.method, 'GET')
- self.assertEqual(request.headers.get('Accept'),
- b'application/yaml')
+ assert request.method == 'GET'
+ assert request.headers.get('Accept') == b'application/yaml'
diff --git a/tests/functional/test_client.py b/tests/functional/test_client.py
index 4ad004df..e1c7d0d3 100644
--- a/tests/functional/test_client.py
+++ b/tests/functional/test_client.py
@@ -1,5 +1,5 @@
import unittest
-
+import pytest
import botocore
class TestCreateClients(unittest.TestCase):
@@ -11,9 +11,9 @@ class TestCreateClients(unittest.TestCase):
client = self.session.create_client('s3', region_name='us-west-2')
# We really just want to ensure create_client doesn't raise
# an exception, but we'll double check that the client looks right.
- self.assertTrue(hasattr(client, 'list_buckets'))
+ assert hasattr(client, 'list_buckets')
def test_client_raises_exception_invalid_region(self):
- with self.assertRaisesRegexp(ValueError, ('invalid region name')):
+ with pytest.raises(ValueError, match=r'invalid region name'):
self.session.create_client(
'cloudformation', region_name='invalid region name')
diff --git a/tests/functional/test_client_class_names.py b/tests/functional/test_client_class_names.py
index a52ce380..e6a393f7 100644
--- a/tests/functional/test_client_class_names.py
+++ b/tests/functional/test_client_class_names.py
@@ -72,5 +72,4 @@ class TestClientClassNames(unittest.TestCase):
session = botocore.session.get_session()
for service_name in SERVICE_TO_CLASS_NAME:
client = session.create_client(service_name, REGION)
- self.assertEqual(client.__class__.__name__,
- SERVICE_TO_CLASS_NAME[service_name])
+ assert client.__class__.__name__ == SERVICE_TO_CLASS_NAME[service_name]
diff --git a/tests/functional/test_client_metadata.py b/tests/functional/test_client_metadata.py
index a560b5a8..32e2f476 100644
--- a/tests/functional/test_client_metadata.py
+++ b/tests/functional/test_client_metadata.py
@@ -20,25 +20,25 @@ class TestClientMeta(unittest.TestCase):
def test_region_name_on_meta(self):
client = self.session.create_client('s3', 'us-west-2')
- self.assertEqual(client.meta.region_name, 'us-west-2')
+ assert client.meta.region_name == 'us-west-2'
def test_endpoint_url_on_meta(self):
client = self.session.create_client('s3', 'us-west-2',
endpoint_url='https://foo')
- self.assertEqual(client.meta.endpoint_url, 'https://foo')
+ assert client.meta.endpoint_url == 'https://foo'
def test_client_has_standard_partition_on_meta(self):
client = self.session.create_client('s3', 'us-west-2')
- self.assertEqual(client.meta.partition, 'aws')
+ assert client.meta.partition == 'aws'
def test_client_has_china_partition_on_meta(self):
client = self.session.create_client('s3', 'cn-north-1')
- self.assertEqual(client.meta.partition, 'aws-cn')
+ assert client.meta.partition == 'aws-cn'
def test_client_has_gov_partition_on_meta(self):
client = self.session.create_client('s3', 'us-gov-west-1')
- self.assertEqual(client.meta.partition, 'aws-us-gov')
+ assert client.meta.partition == 'aws-us-gov'
def test_client_has_no_partition_on_meta_if_custom_region(self):
client = self.session.create_client('s3', 'myregion')
- self.assertEqual(client.meta.partition, 'aws')
+ assert client.meta.partition == 'aws'
diff --git a/tests/functional/test_cloudsearchdomain.py b/tests/functional/test_cloudsearchdomain.py
index a39919b5..afc5035d 100644
--- a/tests/functional/test_cloudsearchdomain.py
+++ b/tests/functional/test_cloudsearchdomain.py
@@ -26,7 +26,7 @@ class TestCloudsearchdomain(BaseSessionTest):
with self.http_stubber:
self.client.search(query='foo')
request = self.http_stubber.requests[0]
- self.assertIn('q=foo', request.body)
- self.assertEqual(request.method, 'POST')
+ assert 'q=foo' in request.body
+ assert request.method == 'POST'
content_type = b'application/x-www-form-urlencoded'
- self.assertEqual(request.headers.get('Content-Type'), content_type)
+ assert request.headers.get('Content-Type') == content_type
diff --git a/tests/functional/test_cognito_idp.py b/tests/functional/test_cognito_idp.py
index 7c216237..3be4616a 100644
--- a/tests/functional/test_cognito_idp.py
+++ b/tests/functional/test_cognito_idp.py
@@ -10,77 +10,79 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import pytest
from tests import mock
from tests import create_session, ClientHTTPStubber
-def test_unsigned_operations():
- operation_params = {
- 'change_password': {
- 'PreviousPassword': 'myoldbadpassword',
- 'ProposedPassword': 'mynewgoodpassword',
- 'AccessToken': 'foobar'
- },
- 'confirm_forgot_password': {
- 'ClientId': 'foo',
- 'Username': 'myusername',
- 'ConfirmationCode': 'thisismeforreal',
- 'Password': 'whydowesendpasswordsviaemail'
- },
- 'confirm_sign_up': {
- 'ClientId': 'foo',
- 'Username': 'myusername',
- 'ConfirmationCode': 'ireallydowanttosignup'
- },
- 'delete_user': {
- 'AccessToken': 'foobar'
- },
- 'delete_user_attributes': {
- 'UserAttributeNames': ['myattribute'],
- 'AccessToken': 'foobar'
- },
- 'forgot_password': {
- 'ClientId': 'foo',
- 'Username': 'myusername'
- },
- 'get_user': {
- 'AccessToken': 'foobar'
- },
- 'get_user_attribute_verification_code': {
- 'AttributeName': 'myattribute',
- 'AccessToken': 'foobar'
- },
- 'resend_confirmation_code': {
- 'ClientId': 'foo',
- 'Username': 'myusername'
- },
- 'set_user_settings': {
- 'AccessToken': 'randomtoken',
- 'MFAOptions': [{
- 'DeliveryMedium': 'SMS',
- 'AttributeName': 'someattributename'
- }]
- },
- 'sign_up': {
- 'ClientId': 'foo',
- 'Username': 'bar',
- 'Password': 'mysupersecurepassword',
- },
- 'update_user_attributes': {
- 'UserAttributes': [{
- 'Name': 'someattributename',
- 'Value': 'newvalue'
- }],
- 'AccessToken': 'foobar'
- },
- 'verify_user_attribute': {
- 'AttributeName': 'someattributename',
- 'Code': 'someverificationcode',
- 'AccessToken': 'foobar'
- },
- }
+OPERATION_PARAMS = {
+ 'change_password': {
+ 'PreviousPassword': 'myoldbadpassword',
+ 'ProposedPassword': 'mynewgoodpassword',
+ 'AccessToken': 'foobar'
+ },
+ 'confirm_forgot_password': {
+ 'ClientId': 'foo',
+ 'Username': 'myusername',
+ 'ConfirmationCode': 'thisismeforreal',
+ 'Password': 'whydowesendpasswordsviaemail'
+ },
+ 'confirm_sign_up': {
+ 'ClientId': 'foo',
+ 'Username': 'myusername',
+ 'ConfirmationCode': 'ireallydowanttosignup'
+ },
+ 'delete_user': {
+ 'AccessToken': 'foobar'
+ },
+ 'delete_user_attributes': {
+ 'UserAttributeNames': ['myattribute'],
+ 'AccessToken': 'foobar'
+ },
+ 'forgot_password': {
+ 'ClientId': 'foo',
+ 'Username': 'myusername'
+ },
+ 'get_user': {
+ 'AccessToken': 'foobar'
+ },
+ 'get_user_attribute_verification_code': {
+ 'AttributeName': 'myattribute',
+ 'AccessToken': 'foobar'
+ },
+ 'resend_confirmation_code': {
+ 'ClientId': 'foo',
+ 'Username': 'myusername'
+ },
+ 'set_user_settings': {
+ 'AccessToken': 'randomtoken',
+ 'MFAOptions': [{
+ 'DeliveryMedium': 'SMS',
+ 'AttributeName': 'someattributename'
+ }]
+ },
+ 'sign_up': {
+ 'ClientId': 'foo',
+ 'Username': 'bar',
+ 'Password': 'mysupersecurepassword',
+ },
+ 'update_user_attributes': {
+ 'UserAttributes': [{
+ 'Name': 'someattributename',
+ 'Value': 'newvalue'
+ }],
+ 'AccessToken': 'foobar'
+ },
+ 'verify_user_attribute': {
+ 'AttributeName': 'someattributename',
+ 'Code': 'someverificationcode',
+ 'AccessToken': 'foobar'
+ },
+}
+@pytest.mark.parametrize("operation_name, params", OPERATION_PARAMS.items())
+def test_unsigned_operations(operation_name, params):
environ = {
'AWS_ACCESS_KEY_ID': 'access_key',
'AWS_SECRET_ACCESS_KEY': 'secret_key',
@@ -91,25 +93,13 @@ def test_unsigned_operations():
session = create_session()
session.config_filename = 'no-exist-foo'
client = session.create_client('cognito-idp', 'us-west-2')
+ http_stubber = ClientHTTPStubber(client)
+ operation = getattr(client, operation_name)
+ http_stubber.add_response(body=b'{}')
- for operation, params in operation_params.items():
- UnsignedOperationTestCase(client, operation, params).run()
-
-
-class UnsignedOperationTestCase(object):
- def __init__(self, client, operation_name, parameters):
- self._client = client
- self._operation_name = operation_name
- self._parameters = parameters
- self._http_stubber = ClientHTTPStubber(self._client)
-
- def run(self):
- operation = getattr(self._client, self._operation_name)
-
- self._http_stubber.add_response(body=b'{}')
- with self._http_stubber:
- operation(**self._parameters)
- request = self._http_stubber.requests[0]
+ with http_stubber:
+ operation(**params)
+ request = http_stubber.requests[0]
assert 'authorization' not in request.headers, \
'authorization header found in unsigned operation'
diff --git a/tests/functional/test_credentials.py b/tests/functional/test_credentials.py
index bee2b540..8f4a0967 100644
--- a/tests/functional/test_credentials.py
+++ b/tests/functional/test_credentials.py
@@ -20,6 +20,7 @@ import tempfile
import shutil
from datetime import datetime, timedelta
import sys
+import pytest
from dateutil.tz import tzlocal
from botocore.exceptions import CredentialRetrievalError
@@ -68,13 +69,13 @@ class TestCredentialRefreshRaces(unittest.TestCase):
# from the first refresh ('1'), the secret key from
# the second refresh ('2'), and the token from the
# first refresh ('1').
- self.assertTrue(creds[0] == creds[1] == creds[2], creds)
+ assert creds[0] == creds[1] == creds[2], creds
def assert_non_none_retrieved_credentials(self, func):
collected = []
self._run_threads(50, func, collected)
for cred in collected:
- self.assertIsNotNone(cred)
+ assert cred is not None
def _run_threads(self, num_threads, func, collected):
threads = []
@@ -104,11 +105,9 @@ class TestCredentialRefreshRaces(unittest.TestCase):
# So, for example, if execution time took 6.1 seconds, then
# we should see a maximum number of refreshes being (6 / 2.0) + 1 = 4
max_calls_allowed = math.ceil((end - start) / 2.0) + 1
- self.assertTrue(creds.refresh_counter <= max_calls_allowed,
- "Too many cred refreshes, max: %s, actual: %s, "
- "time_delta: %.4f" % (max_calls_allowed,
- creds.refresh_counter,
- (end - start)))
+ msg = "Too many cred refreshes, max: %s, actual: %s, time_delta: %.4f"
+ assert creds.refresh_counter <= max_calls_allowed, msg % (
+ max_calls_allowed, creds.refresh_counter, (end - start))
def test_no_race_for_immediate_advisory_expiration(self):
creds = IntegerRefresher(
@@ -194,7 +193,7 @@ class BaseAssumeRoleTest(BaseEnvVar):
c2_frozen = c2
if not isinstance(c2_frozen, ReadOnlyCredentials):
c2_frozen = c2.get_frozen_credentials()
- self.assertEqual(c1_frozen, c2_frozen)
+ assert c1_frozen == c2_frozen
def write_config(self, config):
with open(self.config_file, 'w') as f:
@@ -319,7 +318,7 @@ class TestAssumeRole(BaseAssumeRoleTest):
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
- self.assertEqual(self.env_provider.load.call_count, 1)
+ assert self.env_provider.load.call_count == 1
def test_instance_metadata_credential_source(self):
config = (
@@ -341,7 +340,7 @@ class TestAssumeRole(BaseAssumeRoleTest):
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
- self.assertEqual(self.metadata_provider.load.call_count, 1)
+ assert self.metadata_provider.load.call_count == 1
def test_container_credential_source(self):
config = (
@@ -363,7 +362,7 @@ class TestAssumeRole(BaseAssumeRoleTest):
self.assert_creds_equal(actual_creds, expected_creds)
stubber.assert_no_pending_responses()
- self.assertEqual(self.container_provider.load.call_count, 1)
+ assert self.container_provider.load.call_count == 1
def test_invalid_credential_source(self):
config = (
@@ -373,7 +372,7 @@ class TestAssumeRole(BaseAssumeRoleTest):
)
self.write_config(config)
- with self.assertRaises(InvalidConfigError):
+ with pytest.raises(InvalidConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials()
@@ -387,7 +386,7 @@ class TestAssumeRole(BaseAssumeRoleTest):
)
self.write_config(config)
- with self.assertRaises(InvalidConfigError):
+ with pytest.raises(InvalidConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials().get_frozen_credentials()
@@ -451,7 +450,7 @@ class TestAssumeRole(BaseAssumeRoleTest):
)
self.write_config(config)
- with self.assertRaises(InfiniteLoopConfigError):
+ with pytest.raises(InfiniteLoopConfigError):
session, _ = self.create_session(profile='A')
session.get_credentials()
@@ -475,14 +474,14 @@ class TestAssumeRole(BaseAssumeRoleTest):
stubber.assert_no_pending_responses()
# Assert that the client was created with the credentials from the
# credential process.
- self.assertEqual(self.mock_client_creator.call_count, 1)
+ assert self.mock_client_creator.call_count == 1
_, kwargs = self.mock_client_creator.call_args_list[0]
expected_kwargs = {
'aws_access_key_id': 'spam',
'aws_secret_access_key': 'eggs',
'aws_session_token': None,
}
- self.assertEqual(kwargs, expected_kwargs)
+ assert kwargs == expected_kwargs
def test_web_identity_source_profile(self):
token_path = os.path.join(self.tempdir, 'token')
@@ -516,14 +515,14 @@ class TestAssumeRole(BaseAssumeRoleTest):
stubber.assert_no_pending_responses()
# Assert that the client was created with the credentials from the
# assume role with web identity call.
- self.assertEqual(self.mock_client_creator.call_count, 1)
+ assert self.mock_client_creator.call_count == 1
_, kwargs = self.mock_client_creator.call_args_list[0]
expected_kwargs = {
'aws_access_key_id': identity_creds.access_key,
'aws_secret_access_key': identity_creds.secret_key,
'aws_session_token': identity_creds.token,
}
- self.assertEqual(kwargs, expected_kwargs)
+ assert kwargs == expected_kwargs
def test_web_identity_source_profile_ignores_env_vars(self):
token_path = os.path.join(self.tempdir, 'token')
@@ -543,7 +542,7 @@ class TestAssumeRole(BaseAssumeRoleTest):
# The config is split between the profile and the env, we
# should only be looking at the profile so this should raise
# a configuration error.
- with self.assertRaises(InvalidConfigError):
+ with pytest.raises(InvalidConfigError):
session.get_credentials()
def test_sso_source_profile(self):
@@ -596,14 +595,14 @@ class TestAssumeRole(BaseAssumeRoleTest):
sts_stubber.assert_no_pending_responses()
# Assert that the client was created with the credentials from the
# SSO get role credentials response
- self.assertEqual(self.mock_client_creator.call_count, 1)
+ assert self.mock_client_creator.call_count == 1
_, kwargs = self.mock_client_creator.call_args_list[0]
expected_kwargs = {
'aws_access_key_id': sso_role_creds.access_key,
'aws_secret_access_key': sso_role_creds.secret_key,
'aws_session_token': sso_role_creds.token,
}
- self.assertEqual(kwargs, expected_kwargs)
+ assert kwargs == expected_kwargs
def test_web_identity_credential_source_ignores_env_vars(self):
token_path = os.path.join(self.tempdir, 'token')
@@ -623,7 +622,7 @@ class TestAssumeRole(BaseAssumeRoleTest):
# environment when the Environment credential_source is set.
# There are no Environment credentials, so this should raise a
# retrieval error.
- with self.assertRaises(CredentialRetrievalError):
+ with pytest.raises(CredentialRetrievalError):
session.get_credentials()
def test_self_referential_profile(self):
@@ -682,7 +681,7 @@ class TestAssumeRole(BaseAssumeRoleTest):
provider = resolver.get_provider('assume-role')
creds = provider.load()
self.assert_creds_equal(creds, expected_creds)
- self.assertEqual(self.actual_client_region, 'cn-north-1')
+ assert self.actual_client_region == 'cn-north-1'
class TestAssumeRoleWithWebIdentity(BaseAssumeRoleTest):
@@ -795,8 +794,8 @@ class TestProcessProvider(unittest.TestCase):
self.environ['AWS_CONFIG_FILE'] = f.name
credentials = Session(profile='processcreds').get_credentials()
- self.assertEqual(credentials.access_key, 'spam')
- self.assertEqual(credentials.secret_key, 'eggs')
+ assert credentials.access_key == 'spam'
+ assert credentials.secret_key == 'eggs'
def test_credential_process_returns_error(self):
config = (
@@ -825,7 +824,7 @@ class TestProcessProvider(unittest.TestCase):
# Finally `(?s)` at the beginning makes dots match newlines so
# we can handle a multi-line string.
reg = r"(?s)^((?!b').)*$"
- with six.assertRaisesRegex(self, CredentialRetrievalError, reg):
+ with pytest.raises(CredentialRetrievalError, match=reg):
session.get_credentials()
@@ -883,10 +882,7 @@ class TestSTSRegional(BaseAssumeRoleTest):
# endpoint.
self.make_stubbed_client_call_to_region(
session, stubber, 'us-west-2')
- self.assertEqual(
- stubber.requests[0].url,
- 'https://sts.us-west-2.amazonaws.com/'
- )
+ assert stubber.requests[0].url == 'https://sts.us-west-2.amazonaws.com/'
def test_assume_role_web_identity_uses_same_region_as_client(self):
token_file = os.path.join(self.tempdir, 'token.jwt')
@@ -914,7 +910,4 @@ class TestSTSRegional(BaseAssumeRoleTest):
# endpoint.
self.make_stubbed_client_call_to_region(
session, stubber, 'us-west-2')
- self.assertEqual(
- stubber.requests[0].url,
- 'https://sts.us-west-2.amazonaws.com/'
- )
+ assert stubber.requests[0].url == 'https://sts.us-west-2.amazonaws.com/'
\ No newline at end of file
diff --git a/tests/functional/test_discovery.py b/tests/functional/test_discovery.py
index eaae3464..ad5e905e 100644
--- a/tests/functional/test_discovery.py
+++ b/tests/functional/test_discovery.py
@@ -11,6 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
+import pytest
from tests import ClientHTTPStubber, temporary_file
from tests.functional import FunctionalSessionTest
@@ -63,19 +64,16 @@ class TestEndpointDiscovery(FunctionalSessionTest):
self.environ['AWS_CONFIG_FILE'] = fileobj.name
def assert_endpoint_discovery_used(self, stubber, discovered_endpoint):
- self.assertEqual(len(stubber.requests), 2)
+ assert len(stubber.requests) == 2
discover_request = stubber.requests[1]
- self.assertEqual(discover_request.url, discovered_endpoint)
+ assert discover_request.url == discovered_endpoint
def assert_discovery_skipped(self, stubber, operation):
- self.assertEqual(len(stubber.requests), 1)
- self.assertEqual(
- stubber.requests[0].headers.get('X-Amz-Target'),
- operation
- )
+ assert len(stubber.requests) == 1
+ assert stubber.requests[0].headers.get('X-Amz-Target') == operation
def assert_endpoint_used(self, actual_url, expected_url):
- self.assertEqual(actual_url, expected_url)
+ assert actual_url == expected_url
def test_endpoint_discovery_enabled(self):
discovered_endpoint = 'https://discovered.domain'
@@ -100,7 +98,7 @@ class TestEndpointDiscovery(FunctionalSessionTest):
client, http_stubber = self.create_client(config=config)
with http_stubber as stubber:
stubber.add_response(status=421, body=response_body)
- with self.assertRaises(ClientError):
+ with pytest.raises(ClientError):
client.describe_table(TableName='sometable')
def test_endpoint_discovery_disabled(self):
@@ -109,14 +107,14 @@ class TestEndpointDiscovery(FunctionalSessionTest):
with http_stubber as stubber:
stubber.add_response(status=200, body=b'{}')
client.describe_table(TableName='sometable')
- self.assertEqual(len(stubber.requests), 1)
+ assert len(stubber.requests) == 1
def test_endpoint_discovery_no_config_default(self):
client, http_stubber = self.create_client()
with http_stubber as stubber:
stubber.add_response(status=200, body=b'{}')
client.describe_table(TableName='sometable')
- self.assertEqual(len(stubber.requests), 1)
+ assert len(stubber.requests) == 1
def test_endpoint_discovery_default_required_endpoint(self):
discovered_endpoint = "https://discovered.domain"
@@ -144,7 +142,7 @@ class TestEndpointDiscovery(FunctionalSessionTest):
service_name="test-discovery-endpoint", config=config
)
self.add_describe_endpoints_response(http_stubber, discovered_endpoint)
- with self.assertRaises(EndpointDiscoveryRequired):
+ with pytest.raises(EndpointDiscoveryRequired):
client.test_discovery_required(Foo="bar")
def test_endpoint_discovery_required_with_custom_endpoint(self):
@@ -231,7 +229,7 @@ class TestEndpointDiscovery(FunctionalSessionTest):
with http_stubber as stubber:
stubber.add_response(status=200, body=b'{}')
client.test_discovery_optional(Foo="bar")
- self.assertEqual(len(stubber.requests), 1)
+ assert len(stubber.requests) == 1
def test_endpoint_discovery_enabled_optional_endpoint(self):
discovered_endpoint = 'https://discovered.domain'
@@ -257,7 +255,7 @@ class TestEndpointDiscovery(FunctionalSessionTest):
def test_endpoint_discovery_enabled_with_random_string(self):
config = Config(endpoint_discovery_enabled="bad value")
- with self.assertRaises(InvalidEndpointDiscoveryConfigurationError):
+ with pytest.raises(InvalidEndpointDiscoveryConfigurationError):
client, http_stubber = self.create_client(
service_name="test-discovery-endpoint", config=config
)
@@ -304,7 +302,7 @@ class TestEndpointDiscovery(FunctionalSessionTest):
service_name="test-discovery-endpoint"
)
self.add_describe_endpoints_response(http_stubber, discovered_endpoint)
- with self.assertRaises(EndpointDiscoveryRequired):
+ with pytest.raises(EndpointDiscoveryRequired):
client.test_discovery_required(Foo="bar")
def test_endpoint_discovery_with_config_file_enabled(self):
@@ -339,7 +337,7 @@ class TestEndpointDiscovery(FunctionalSessionTest):
service_name="test-discovery-endpoint"
)
self.add_describe_endpoints_response(http_stubber, discovered_endpoint)
- with self.assertRaises(EndpointDiscoveryRequired):
+ with pytest.raises(EndpointDiscoveryRequired):
client.test_discovery_required(Foo="bar")
def test_endpoint_discovery_with_config_file_auto(self):
diff --git a/tests/functional/test_docdb.py b/tests/functional/test_docdb.py
index 1408b0a7..cc06d80c 100644
--- a/tests/functional/test_docdb.py
+++ b/tests/functional/test_docdb.py
@@ -21,8 +21,8 @@ class TestDocDBPresignUrlInjection(BaseSessionTest):
self.http_stubber = ClientHTTPStubber(self.client)
def assert_presigned_url_injected_in_request(self, body):
- self.assertIn('PreSignedUrl', body)
- self.assertNotIn('SourceRegion', body)
+ assert 'PreSignedUrl' in body
+ assert 'SourceRegion' not in body
def test_create_db_cluster(self):
params = {
diff --git a/tests/functional/test_dynamodb.py b/tests/functional/test_dynamodb.py
index 1cd3e82f..8d9011af 100644
--- a/tests/functional/test_dynamodb.py
+++ b/tests/functional/test_dynamodb.py
@@ -42,9 +42,9 @@ class TestDynamoDBEndpointDiscovery(BaseSessionTest):
stubber.add_response(status=200, body=response_body)
stubber.add_response(status=200, body=b'{}')
self.client.describe_table(TableName='sometable')
- self.assertEqual(len(self.http_stubber.requests), 2)
+ assert len(self.http_stubber.requests) == 2
discover_request = self.http_stubber.requests[1]
- self.assertEqual(discover_request.url, discovered_endpoint)
+ assert discover_request.url == discovered_endpoint
def test_dynamodb_endpoint_discovery_disabled(self):
self.config = Config(endpoint_discovery_enabled=False)
@@ -52,7 +52,7 @@ class TestDynamoDBEndpointDiscovery(BaseSessionTest):
with self.http_stubber as stubber:
stubber.add_response(status=200, body=b'{}')
self.client.describe_table(TableName='sometable')
- self.assertEqual(len(self.http_stubber.requests), 1)
+ assert len(self.http_stubber.requests) == 1
def test_dynamodb_endpoint_discovery_no_config_default(self):
self.config = None
@@ -60,4 +60,4 @@ class TestDynamoDBEndpointDiscovery(BaseSessionTest):
with self.http_stubber as stubber:
stubber.add_response(status=200, body=b'{}')
self.client.describe_table(TableName='sometable')
- self.assertEqual(len(self.http_stubber.requests), 1)
+ assert len(self.http_stubber.requests) == 1
diff --git a/tests/functional/test_ec2.py b/tests/functional/test_ec2.py
index 50728e73..c80d4345 100644
--- a/tests/functional/test_ec2.py
+++ b/tests/functional/test_ec2.py
@@ -54,7 +54,7 @@ class TestIdempotencyToken(unittest.TestCase):
PurchaseRequests=[{'PurchaseToken': 'foo',
'InstanceCount': 123}],
ClientToken='foobar')
- self.assertIn('ClientToken', self.params_seen)
+ assert 'ClientToken' in self.params_seen
def test_insert_idempotency_token(self):
expected_params = {
@@ -70,7 +70,7 @@ class TestIdempotencyToken(unittest.TestCase):
self.client.purchase_scheduled_instances(
PurchaseRequests=[{'PurchaseToken': 'foo',
'InstanceCount': 123}])
- self.assertIn('ClientToken', self.params_seen)
+ assert 'ClientToken' in self.params_seen
class TestCopySnapshotCustomization(BaseSessionTest):
@@ -109,27 +109,27 @@ class TestCopySnapshotCustomization(BaseSessionTest):
SourceRegion='us-west-2',
SourceSnapshotId=self.snapshot_id,
)
- self.assertEqual(result['SnapshotId'], self.snapshot_id)
- self.assertEqual(len(self.http_stubber.requests), 1)
+ assert result['SnapshotId'] == self.snapshot_id
+ assert len(self.http_stubber.requests) == 1
snapshot_request = self.http_stubber.requests[0]
body = parse_qs(snapshot_request.body)
- self.assertIn('PresignedUrl', body)
+ assert 'PresignedUrl' in body
presigned_url = urlparse(body['PresignedUrl'][0])
- self.assertEqual(presigned_url.scheme, 'https')
- self.assertEqual(presigned_url.netloc, 'ec2.us-west-2.amazonaws.com')
+ assert presigned_url.scheme == 'https'
+ assert presigned_url.netloc == 'ec2.us-west-2.amazonaws.com'
query_args = parse_qs(presigned_url.query)
- self.assertEqual(query_args['Action'], ['CopySnapshot'])
- self.assertEqual(query_args['Version'], ['2016-11-15'])
- self.assertEqual(query_args['SourceRegion'], ['us-west-2'])
- self.assertEqual(query_args['DestinationRegion'], ['us-east-1'])
- self.assertEqual(query_args['SourceSnapshotId'], [self.snapshot_id])
- self.assertEqual(query_args['X-Amz-Algorithm'], ['AWS4-HMAC-SHA256'])
+ assert query_args['Action'] == ['CopySnapshot']
+ assert query_args['Version'] == ['2016-11-15']
+ assert query_args['SourceRegion'] == ['us-west-2']
+ assert query_args['DestinationRegion'] == ['us-east-1']
+ assert query_args['SourceSnapshotId'] == [self.snapshot_id]
+ assert query_args['X-Amz-Algorithm'] == ['AWS4-HMAC-SHA256']
expected_credential = 'access_key/20110909/us-west-2/ec2/aws4_request'
- self.assertEqual(query_args['X-Amz-Credential'], [expected_credential])
- self.assertEqual(query_args['X-Amz-Date'], ['20110909T233600Z'])
- self.assertEqual(query_args['X-Amz-Expires'], ['3600'])
- self.assertEqual(query_args['X-Amz-SignedHeaders'], ['host'])
+ assert query_args['X-Amz-Credential'] == [expected_credential]
+ assert query_args['X-Amz-Date'] == ['20110909T233600Z']
+ assert query_args['X-Amz-Expires'] == ['3600']
+ assert query_args['X-Amz-SignedHeaders'] == ['host']
expected_signature = (
'a94a6b52afdf3daa34c2e2a38a62b72c8dac129c9904c61aa1a5d86e38628537'
)
- self.assertEqual(query_args['X-Amz-Signature'], [expected_signature])
+ assert query_args['X-Amz-Signature'] == [expected_signature]
diff --git a/tests/functional/test_event_alias.py b/tests/functional/test_event_alias.py
index b821f4d9..ce1ecccb 100644
--- a/tests/functional/test_event_alias.py
+++ b/tests/functional/test_event_alias.py
@@ -1,3 +1,4 @@
+import pytest
from botocore.session import Session
@@ -577,15 +578,14 @@ SERVICES = {
}
}
-
-def test_event_alias():
- for client_name in SERVICES.keys():
- endpoint_prefix = SERVICES[client_name].get('endpoint_prefix')
- service_id = SERVICES[client_name]['service_id']
- if endpoint_prefix is not None:
- yield _assert_handler_called, client_name, endpoint_prefix
- _assert_handler_called(client_name, service_id)
- _assert_handler_called(client_name, client_name)
+@pytest.mark.parametrize("client_name, metadata", SERVICES.items())
+def test_event_alias(client_name, metadata):
+ endpoint_prefix = metadata.get('endpoint_prefix')
+ service_id = metadata['service_id']
+ if endpoint_prefix is not None:
+ _assert_handler_called(client_name, endpoint_prefix)
+ _assert_handler_called(client_name, service_id)
+ _assert_handler_called(client_name, client_name)
def _assert_handler_called(client_name, event_part):
diff --git a/tests/functional/test_events.py b/tests/functional/test_events.py
index 69250e26..dffaf48a 100644
--- a/tests/functional/test_events.py
+++ b/tests/functional/test_events.py
@@ -10,6 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import pytest
from tests import mock, BaseSessionTest
@@ -42,24 +43,22 @@ class TestClientEvents(BaseSessionTest):
mock_send.return_value = mock.Mock(
status_code=200, headers={}, content=response_body)
self.client.describe_regions()
- self.assertEqual(
- recording_handler.recorded_events,
- [
- ('response-received.ec2.DescribeRegions',
- {
- 'exception': None,
- 'response_dict': {
- 'body': response_body,
- 'headers': {},
- 'context': mock.ANY,
- 'status_code': 200
- },
- 'parsed_response': {
- 'ResponseMetadata': mock.ANY},
- 'context': mock.ANY
- })
- ]
- )
+ expected_events = [
+ ('response-received.ec2.DescribeRegions',
+ {
+ 'exception': None,
+ 'response_dict': {
+ 'body': response_body,
+ 'headers': {},
+ 'context': mock.ANY,
+ 'status_code': 200
+ },
+ 'parsed_response': {
+ 'ResponseMetadata': mock.ANY},
+ 'context': mock.ANY
+ })
+ ]
+ assert recording_handler.recorded_events == expected_events
def test_emit_response_received_for_exception(self):
recording_handler = RecordingHandler()
@@ -69,11 +68,9 @@ class TestClientEvents(BaseSessionTest):
'botocore.httpsession.URLLib3Session.send') as mock_send:
raised_exception = RuntimeError('Unexpected exception')
mock_send.side_effect = raised_exception
- with self.assertRaises(RuntimeError):
+ with pytest.raises(RuntimeError):
self.client.describe_regions()
- self.assertEqual(
- recording_handler.recorded_events,
- [
+ expected_events = [
('response-received.ec2.DescribeRegions',
{
'exception': raised_exception,
@@ -81,5 +78,5 @@ class TestClientEvents(BaseSessionTest):
'parsed_response': None,
'context': mock.ANY
})
- ]
- )
+ ]
+ assert recording_handler.recorded_events == expected_events
diff --git a/tests/functional/test_history.py b/tests/functional/test_history.py
index e645ac73..8ed68a69 100644
--- a/tests/functional/test_history.py
+++ b/tests/functional/test_history.py
@@ -49,15 +49,15 @@ class TestRecordStatementsInjections(BaseSessionTest):
self.client.list_buckets()
api_call_events = self._get_all_events_of_type('API_CALL')
- self.assertEqual(len(api_call_events), 1)
+ assert len(api_call_events) == 1
event = api_call_events[0]
event_type, payload, source = event
- self.assertEqual(payload, {
+ assert payload == {
'operation': u'ListBuckets',
'params': {},
'service': 's3'
- })
- self.assertEqual(source, 'BOTOCORE')
+ }
+ assert source == 'BOTOCORE'
def test_does_record_http_request(self):
self.http_stubber.add_response(body=self.s3_response_body)
@@ -65,30 +65,30 @@ class TestRecordStatementsInjections(BaseSessionTest):
self.client.list_buckets()
http_request_events = self._get_all_events_of_type('HTTP_REQUEST')
- self.assertEqual(len(http_request_events), 1)
+ assert len(http_request_events) == 1
event = http_request_events[0]
event_type, payload, source = event
method = payload['method']
- self.assertEqual(method, u'GET')
+ assert method == u'GET'
# The header values vary too much per request to verify them here.
# Instead just check the presense of each expected header.
headers = payload['headers']
for expected_header in ['Authorization', 'User-Agent', 'X-Amz-Date',
'X-Amz-Content-SHA256']:
- self.assertIn(expected_header, headers)
+ assert expected_header in headers
body = payload['body']
- self.assertIsNone(body)
+ assert body is None
streaming = payload['streaming']
- self.assertEqual(streaming, False)
+ assert streaming == False
url = payload['url']
- self.assertEqual(url, 'https://s3.us-west-2.amazonaws.com/')
+ assert url == 'https://s3.us-west-2.amazonaws.com/'
- self.assertEqual(source, 'BOTOCORE')
+ assert source == 'BOTOCORE'
def test_does_record_http_response(self):
self.http_stubber.add_response(body=self.s3_response_body)
@@ -96,19 +96,18 @@ class TestRecordStatementsInjections(BaseSessionTest):
self.client.list_buckets()
http_response_events = self._get_all_events_of_type('HTTP_RESPONSE')
- self.assertEqual(len(http_response_events), 1)
+ assert len(http_response_events) == 1
event = http_response_events[0]
event_type, payload, source = event
- self.assertEqual(payload, {
+ assert payload == {
'status_code': 200,
'headers': {},
'streaming': False,
'body': self.s3_response_body,
'context': {'operation_name': 'ListBuckets'}
}
- )
- self.assertEqual(source, 'BOTOCORE')
+ assert source == 'BOTOCORE'
def test_does_record_parsed_response(self):
self.http_stubber.add_response(body=self.s3_response_body)
@@ -117,7 +116,7 @@ class TestRecordStatementsInjections(BaseSessionTest):
parsed_response_events = self._get_all_events_of_type(
'PARSED_RESPONSE')
- self.assertEqual(len(parsed_response_events), 1)
+ assert len(parsed_response_events) == 1
event = parsed_response_events[0]
event_type, payload, source = event
@@ -126,19 +125,19 @@ class TestRecordStatementsInjections(BaseSessionTest):
# assert the interesting bits since mock can only assert if the args
# all match exactly.
owner = payload['Owner']
- self.assertEqual(owner, {
+ assert owner == {
'DisplayName': 'foo',
'ID': 'd41d8cd98f00b204e9800998ecf8427e'
- })
+ }
buckets = payload['Buckets']
- self.assertEqual(len(buckets), 1)
+ assert len(buckets) == 1
bucket = buckets[0]
- self.assertEqual(bucket['Name'], 'bar')
+ assert bucket['Name'] == 'bar'
metadata = payload['ResponseMetadata']
- self.assertEqual(metadata, {
+ assert metadata == {
'HTTPHeaders': {},
'HTTPStatusCode': 200,
'RetryAttempts': 0
- })
+ }
diff --git a/tests/functional/test_iot_data.py b/tests/functional/test_iot_data.py
index cb3c2bc9..89f0760d 100644
--- a/tests/functional/test_iot_data.py
+++ b/tests/functional/test_iot_data.py
@@ -24,11 +24,11 @@ class TestOpensslVersion(BaseSessionTest):
warning_message = call_args[0]
warning_type = call_args[1]
# We should say something specific about the service.
- self.assertIn('iot-data', warning_message)
- self.assertEqual(warning_type, UnsupportedTLSVersionWarning)
+ assert 'iot-data' in warning_message
+ assert warning_type == UnsupportedTLSVersionWarning
def test_compatible_openssl_version(self):
with mock.patch('ssl.OPENSSL_VERSION_INFO', new=(1, 0, 1, 1, 1)):
with mock.patch('warnings.warn') as mock_warn:
self.session.create_client('iot-data', 'us-east-1')
- self.assertFalse(mock_warn.called)
+ assert mock_warn.called is False
diff --git a/tests/functional/test_kinesis.py b/tests/functional/test_kinesis.py
index 26317956..33773e8f 100644
--- a/tests/functional/test_kinesis.py
+++ b/tests/functional/test_kinesis.py
@@ -32,7 +32,7 @@ class TestKinesisListStreams(BaseSessionTest):
decoded_str = b64decode(encoded_str).decode("utf-8")
except UnicodeDecodeError:
self.fail("Base64 encoded record is not a valid utf-8 string")
- self.assertEqual(decoded_str, expected_value)
+ assert decoded_str == expected_value
def test_can_put_stream_blob(self):
unique_data = str(uuid4())
@@ -40,10 +40,10 @@ class TestKinesisListStreams(BaseSessionTest):
self.client.put_record(
StreamName=self.stream_name, PartitionKey="foo", Data=unique_data
)
- self.assertEqual(len(stub.requests), 1)
+ assert len(stub.requests) == 1
request = json.loads(stub.requests[0].body.decode("utf-8"))
- self.assertEqual(request["StreamName"], self.stream_name)
- self.assertEqual(request["PartitionKey"], "foo")
+ assert request["StreamName"] == self.stream_name
+ assert request["PartitionKey"] == "foo"
self.assert_base64encoded_str_equals(
request["Data"], unique_data
)
@@ -55,13 +55,13 @@ class TestKinesisListStreams(BaseSessionTest):
StreamName=self.stream_name,
Records=[{"Data": unique_data, "PartitionKey": "foo"}],
)
- self.assertEqual(len(stub.requests), 1)
+ assert len(stub.requests) == 1
request = json.loads(stub.requests[0].body.decode("utf-8"))
- self.assertEqual(len(request["Records"]), 1)
- self.assertEqual(request["StreamName"], self.stream_name)
+ assert len(request["Records"]) == 1
+ assert request["StreamName"] == self.stream_name
record = request["Records"][0]
- self.assertEqual(record["PartitionKey"], "foo")
+ assert record["PartitionKey"] == "foo"
self.assert_base64encoded_str_equals(
record["Data"], unique_data
)
@@ -75,9 +75,9 @@ class TestKinesisListStreams(BaseSessionTest):
{"Data": "barfoo", "PartitionKey": "foo"},
],
)
- self.assertEqual(len(stub.requests), 1)
+ assert len(stub.requests) == 1
request = json.loads(stub.requests[0].body.decode("utf-8"))
- self.assertEqual(len(request["Records"]), 2)
+ assert len(request["Records"]) == 2
record_foobar = request["Records"][0]
record_barfoo = request["Records"][1]
diff --git a/tests/functional/test_lex.py b/tests/functional/test_lex.py
index ff152427..015e1ad8 100644
--- a/tests/functional/test_lex.py
+++ b/tests/functional/test_lex.py
@@ -54,7 +54,7 @@ class TestLex(BaseSessionTest):
b' Signature='
b'7f93fde5c36163dce6ee116fcfebab13474ab903782fea04c00bb1dedc3fc4cc'
)
- self.assertEqual(authorization, expected_authorization)
+ assert authorization == expected_authorization
content_header = request.headers.get('x-amz-content-sha256')
- self.assertEqual(content_header, b'UNSIGNED-PAYLOAD')
+ assert content_header == b'UNSIGNED-PAYLOAD'
diff --git a/tests/functional/test_loaders.py b/tests/functional/test_loaders.py
index 97b9ab9e..aa30ec1a 100644
--- a/tests/functional/test_loaders.py
+++ b/tests/functional/test_loaders.py
@@ -37,4 +37,4 @@ class TestLoaderAllowsDataPathOverride(unittest.TestCase):
new_content = loader.load_data('_retry')
# This should contain the content we just created.
- self.assertEqual(new_content, {"foo": "bar"})
+ assert new_content == {"foo": "bar"}
diff --git a/tests/functional/test_machinelearning.py b/tests/functional/test_machinelearning.py
index d96fb4af..bde6f9a3 100644
--- a/tests/functional/test_machinelearning.py
+++ b/tests/functional/test_machinelearning.py
@@ -31,4 +31,4 @@ class TestMachineLearning(BaseSessionTest):
PredictEndpoint=custom_endpoint
)
sent_request = self.http_stubber.requests[0]
- self.assertEqual(sent_request.url, custom_endpoint)
+ assert sent_request.url == custom_endpoint
diff --git a/tests/functional/test_model_completeness.py b/tests/functional/test_model_completeness.py
index 484ee23a..365db9bc 100644
--- a/tests/functional/test_model_completeness.py
+++ b/tests/functional/test_model_completeness.py
@@ -10,6 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import pytest
from botocore.session import Session
from botocore.loaders import Loader
from botocore.exceptions import DataNotFoundError
@@ -33,11 +34,11 @@ def _test_model_is_not_lost(service_name, type_name,
raise AssertionError(
"%s must exist for %s: %s" % (type_name, service_name, e))
-def test_paginators_and_waiters_are_not_lost_in_new_version():
- for service_name in Session().get_available_services():
- versions = Loader().list_api_versions(service_name, 'service-2')
- if len(versions) > 1:
- for type_name in ['paginators-1', 'waiters-2']:
- _test_model_is_not_lost(service_name,
- type_name,
- versions[-2], versions[-1])
+@pytest.mark.parametrize('service_name', Session().get_available_services())
+def test_paginators_and_waiters_are_not_lost_in_new_version(service_name):
+ versions = Loader().list_api_versions(service_name, 'service-2')
+ if len(versions) > 1:
+ for type_name in ['paginators-1', 'waiters-2']:
+ _test_model_is_not_lost(service_name,
+ type_name,
+ versions[-2], versions[-1])
diff --git a/tests/functional/test_modeled_exceptions.py b/tests/functional/test_modeled_exceptions.py
index c4766139..8caee9a8 100644
--- a/tests/functional/test_modeled_exceptions.py
+++ b/tests/functional/test_modeled_exceptions.py
@@ -10,6 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import pytest
from contextlib import contextmanager
from tests import unittest, BaseSessionTest, ClientHTTPStubber
@@ -55,14 +56,14 @@ class TestModeledExceptions(BaseSessionTest):
exception_cls = ses.exceptions.AlreadyExistsException
with http_stubber as stubber:
stubber.add_response(status=400, headers={}, body=body)
- with self.assertRaises(exception_cls) as assertion_context:
+ with pytest.raises(exception_cls) as assertion_context:
template = {
'TemplateName': 'foobar',
'SubjectPart': 'foo',
'TextPart': 'bar'
}
ses.create_template(Template=template)
- self.assertEqual(assertion_context.exception.response, response)
+ assert assertion_context.value.response == response
def test_rest_xml_service(self):
body = (
@@ -93,9 +94,9 @@ class TestModeledExceptions(BaseSessionTest):
exception_cls = cloudfront.exceptions.NoSuchDistribution
with http_stubber as stubber:
stubber.add_response(status=404, headers={}, body=body)
- with self.assertRaises(exception_cls) as assertion_context:
+ with pytest.raises(exception_cls) as assertion_context:
cloudfront.get_distribution(Id='foobar')
- self.assertEqual(assertion_context.exception.response, response)
+ assert assertion_context.value.response == response
def test_rest_json_service(self):
headers = {
@@ -130,9 +131,9 @@ class TestModeledExceptions(BaseSessionTest):
exception_cls = efs.exceptions.FileSystemAlreadyExists
with http_stubber as stubber:
stubber.add_response(status=409, headers=headers, body=body)
- with self.assertRaises(exception_cls) as assertion_context:
+ with pytest.raises(exception_cls) as assertion_context:
efs.create_file_system()
- self.assertEqual(assertion_context.exception.response, response)
+ assert assertion_context.value.response == response
def test_json_service(self):
headers = {
@@ -164,6 +165,6 @@ class TestModeledExceptions(BaseSessionTest):
exception_cls = kinesis.exceptions.ResourceNotFoundException
with http_stubber as stubber:
stubber.add_response(status=400, headers=headers, body=body)
- with self.assertRaises(exception_cls) as assertion_context:
+ with pytest.raises(exception_cls) as assertion_context:
kinesis.describe_stream(StreamName='foobar')
- self.assertEqual(assertion_context.exception.response, response)
+ assert assertion_context.value.response == response
diff --git a/tests/functional/test_neptune.py b/tests/functional/test_neptune.py
index 5bd5fdbc..a06dd51e 100644
--- a/tests/functional/test_neptune.py
+++ b/tests/functional/test_neptune.py
@@ -21,9 +21,8 @@ class TestNeptunePresignUrlInjection(BaseSessionTest):
self.http_stubber = ClientHTTPStubber(self.client)
def assert_presigned_url_injected_in_request(self, body):
- self.assertIn('PreSignedUrl', body)
- self.assertNotIn('SourceRegion', body)
-
+ assert 'PreSignedUrl' in body
+ assert 'SourceRegion' not in body
def test_create_db_cluster(self):
params = {
'DBClusterIdentifier': 'my-cluster',
diff --git a/tests/functional/test_paginate.py b/tests/functional/test_paginate.py
index 8bf84ca6..c72330a8 100644
--- a/tests/functional/test_paginate.py
+++ b/tests/functional/test_paginate.py
@@ -13,6 +13,7 @@
from __future__ import division
from math import ceil
from datetime import datetime
+import pytest
from tests import random_chars, unittest
from tests import BaseSessionTest
@@ -59,8 +60,8 @@ class TestRDSPagination(BaseSessionTest):
'StartingToken': '0',
'MaxItems': 3
}).build_full_result()
- self.assertEqual(result['LogFileData'], 'foo')
- self.assertIn('NextToken', result)
+ assert result['LogFileData'] == 'foo'
+ assert 'NextToken' in result
except StubAssertionError as e:
self.fail(str(e))
@@ -160,7 +161,7 @@ class TestAutoscalingPagination(BaseSessionTest):
while 'NextToken' in result:
starting_token = result['NextToken']
# We should never get a duplicate pagination token.
- self.assertNotIn(starting_token, pagination_tokens)
+ assert starting_token not in pagination_tokens
pagination_tokens.append(starting_token)
conf['StartingToken'] = starting_token
@@ -168,7 +169,7 @@ class TestAutoscalingPagination(BaseSessionTest):
result = pages.build_full_result()
all_results.extend(result['Activities'])
- self.assertEqual(len(all_results), total_items)
+ assert len(all_results) == total_items
class TestCloudwatchLogsPagination(BaseSessionTest):
@@ -212,24 +213,20 @@ class TestCloudwatchLogsPagination(BaseSessionTest):
logGroupName=group_name,
)
result = pages.build_full_result()
- self.assertEqual(len(result['events']), 1)
+ assert len(result['events']) == 1
-class TestTokenEncoding(unittest.TestCase):
- def test_token_encoding(self):
- cases = [
+class TestTokenEncoding():
+
+ @pytest.mark.parametrize("token_dict", [
{'foo': 'bar'},
{'foo': b'bar'},
{'foo': {'bar': b'baz'}},
{'foo': ['bar', b'baz']},
{'foo': b'\xff'},
{'foo': {'bar': b'baz', 'bin': [b'bam']}},
- ]
-
- for token_dict in cases:
- self.assert_token_encodes_and_decodes(token_dict)
-
- def assert_token_encodes_and_decodes(self, token_dict):
+ ])
+ def test_token_encoding(self, token_dict):
encoded = TokenEncoder().encode(token_dict)
assert isinstance(encoded, six.string_types)
decoded = TokenDecoder().decode(encoded)
diff --git a/tests/functional/test_paginator_config.py b/tests/functional/test_paginator_config.py
index b2c3cbfa..227519eb 100644
--- a/tests/functional/test_paginator_config.py
+++ b/tests/functional/test_paginator_config.py
@@ -11,6 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import string
+import pytest
import jmespath
from jmespath.exceptions import JMESPathError
@@ -130,7 +131,8 @@ KNOWN_EXTRA_OUTPUT_KEYS = [
]
-def test_lint_pagination_configs():
+def _generate_page_configs():
+ page_configs = []
session = botocore.session.get_session()
loader = session.get_component('data_loader')
services = loader.list_available_services('paginators-1')
@@ -139,8 +141,14 @@ def test_lint_pagination_configs():
page_config = loader.load_service_model(service_name,
'paginators-1',
service_model.api_version)
- for op_name, single_config in page_config['pagination'].items():
- _lint_single_paginator(op_name, single_config, service_model)
+ yield (service_model, page_config['pagination'])
+ return page_configs
+
+
+@pytest.mark.parametrize('service_model, pagination', _generate_page_configs())
+def test_lint_pagination_configs(service_model, pagination):
+ for op_name, single_config in pagination.items():
+ _lint_single_paginator(op_name, single_config, service_model)
def _lint_single_paginator(operation_name, page_config,
diff --git a/tests/functional/test_public_apis.py b/tests/functional/test_public_apis.py
index 29de2f16..9219789f 100644
--- a/tests/functional/test_public_apis.py
+++ b/tests/functional/test_public_apis.py
@@ -12,6 +12,7 @@
# language governing permissions and limitations under the License.
from collections import defaultdict
+import pytest
from tests import mock
from tests import ClientHTTPStubber
@@ -61,15 +62,14 @@ def _test_public_apis_will_not_be_signed(client, operation, kwargs):
assert sig_v4_disabled, "SigV4 is incorrectly enabled"
-def test_public_apis_will_not_be_signed():
+@pytest.mark.parametrize('service_name, operations', PUBLIC_API_TESTS.items())
+def test_public_apis_will_not_be_signed(service_name, operations):
session = Session()
# Mimic the scenario that user does not have aws credentials setup
session.get_credentials = mock.Mock(return_value=None)
-
- for service_name in PUBLIC_API_TESTS:
- client = session.create_client(service_name, REGIONS[service_name])
- for operation_name in PUBLIC_API_TESTS[service_name]:
- kwargs = PUBLIC_API_TESTS[service_name][operation_name]
- method = getattr(client, xform_name(operation_name))
- _test_public_apis_will_not_be_signed(client, method, kwargs)
+ client = session.create_client(service_name, REGIONS[service_name])
+ for operation_name in operations:
+ kwargs = operations[operation_name]
+ method = getattr(client, xform_name(operation_name))
+ _test_public_apis_will_not_be_signed(client, method, kwargs)
diff --git a/tests/functional/test_rds.py b/tests/functional/test_rds.py
index 71ba41ce..c841d525 100644
--- a/tests/functional/test_rds.py
+++ b/tests/functional/test_rds.py
@@ -24,8 +24,8 @@ class TestRDSPresignUrlInjection(BaseSessionTest):
self.http_stubber = ClientHTTPStubber(self.client)
def assert_presigned_url_injected_in_request(self, body):
- self.assertIn('PreSignedUrl', body)
- self.assertNotIn('SourceRegion', body)
+ assert 'PreSignedUrl' in body
+ assert 'SourceRegion' not in body
def test_copy_snapshot(self):
params = {
@@ -95,8 +95,8 @@ class TestRDS(unittest.TestCase):
DBHostname=hostname, Port=port, DBUsername=username)
endpoint_url = 'host.us-east-1.rds.amazonaws.com:3306'
- self.assertIn(endpoint_url, auth_token)
- self.assertIn('Action=connect', auth_token)
+ assert endpoint_url in auth_token
+ assert 'Action=connect' in auth_token
# Asserts that there is no scheme in the url
- self.assertTrue(auth_token.startswith(hostname))
+ assert auth_token.startswith(hostname)
diff --git a/tests/functional/test_regions.py b/tests/functional/test_regions.py
index e117f356..6ac1335a 100644
--- a/tests/functional/test_regions.py
+++ b/tests/functional/test_regions.py
@@ -12,6 +12,7 @@
# language governing permissions and limitations under the License.
from tests import create_session, unittest
+import pytest
from tests import mock
from botocore.client import ClientEndpointBridge
@@ -446,60 +447,43 @@ def _get_patched_session():
session = create_session()
return session
+def _test_single_service_region(service_name, region_name,
+ expected_endpoint, resolver):
+ bridge = ClientEndpointBridge(resolver, None, None)
+ result = bridge.resolve(service_name, region_name)
+ expected = 'https://%s' % expected_endpoint
+ assert result['endpoint_url'] == expected
-class TestRegions(unittest.TestCase):
- def test_known_endpoints(self):
- # Verify the actual values from the partition files. While
- # TestEndpointHeuristics verified the generic functionality given
- # any endpoints file, this test actually verifies the partition
- # data against a fixed list of known endpoints. This list doesn't
- # need to be kept 100% up to date, but serves as a basis for
- # regressions as the endpoint data logic evolves.
- resolver = _get_patched_session()._get_internal_component(
- 'endpoint_resolver')
- for region_name, service_dict in KNOWN_REGIONS.items():
- for service_name, endpoint in service_dict.items():
- self._test_single_service_region(service_name,
- region_name, endpoint,
- resolver)
+# Ensure that all S3 regions use s3v4 instead of v4
+def test_all_s3_endpoints_have_s3v4():
+ session = _get_patched_session()
+ partitions = session.get_available_partitions()
+ resolver = session._get_internal_component('endpoint_resolver')
+ for partition_name in partitions:
+ for endpoint in session.get_available_regions('s3', partition_name):
+ resolved = resolver.construct_endpoint('s3', endpoint)
+ assert 's3v4' in resolved['signatureVersions']
+ assert 'v4' not in resolved['signatureVersions']
- def _test_single_service_region(self, service_name, region_name,
- expected_endpoint, resolver):
- bridge = ClientEndpointBridge(resolver, None, None)
- result = bridge.resolve(service_name, region_name)
- expected = 'https://%s' % expected_endpoint
- self.assertEqual(result['endpoint_url'], expected)
+def _test_single_service_partition_endpoint(service_name,
+ expected_endpoint,
+ resolver):
+ bridge = ClientEndpointBridge(resolver)
+ result = bridge.resolve(service_name)
+ assert result['endpoint_url'] == expected_endpoint
- # Ensure that all S3 regions use s3v4 instead of v4
- def test_all_s3_endpoints_have_s3v4(self):
- session = _get_patched_session()
- partitions = session.get_available_partitions()
- resolver = session._get_internal_component('endpoint_resolver')
- for partition_name in partitions:
- for endpoint in session.get_available_regions('s3', partition_name):
- resolved = resolver.construct_endpoint('s3', endpoint)
- assert 's3v4' in resolved['signatureVersions']
- assert 'v4' not in resolved['signatureVersions']
+def test_known_endpoints_other():
+ resolver = _get_patched_session()._get_internal_component(
+ 'endpoint_resolver')
+ for service_name, endpoint in KNOWN_AWS_PARTITION_WIDE.items():
+ _test_single_service_partition_endpoint(service_name,
+ endpoint, resolver)
- def _test_single_service_partition_endpoint(self, service_name,
- expected_endpoint,
- resolver):
- bridge = ClientEndpointBridge(resolver)
- result = bridge.resolve(service_name)
- assert result['endpoint_url'] == expected_endpoint
-
- def test_known_endpoints_other(self):
- resolver = _get_patched_session()._get_internal_component(
- 'endpoint_resolver')
- for service_name, endpoint in KNOWN_AWS_PARTITION_WIDE.items():
- self._test_single_service_partition_endpoint(service_name,
- endpoint, resolver)
-
- def test_non_partition_endpoint_requires_region(self):
- resolver = _get_patched_session()._get_internal_component(
- 'endpoint_resolver')
- with self.assertRaises(NoRegionError):
- resolver.construct_endpoint('ec2')
+def test_non_partition_endpoint_requires_region():
+ resolver = _get_patched_session()._get_internal_component(
+ 'endpoint_resolver')
+ with pytest.raises(NoRegionError):
+ resolver.construct_endpoint('ec2')
class TestEndpointResolution(BaseSessionTest):
@@ -523,10 +507,7 @@ class TestEndpointResolution(BaseSessionTest):
client, stubber = self.create_stubbed_client('s3', 'us-east-2')
stubber.add_response()
client.list_buckets()
- self.assertEqual(
- stubber.requests[0].url,
- 'https://s3.us-east-2.amazonaws.com/'
- )
+ assert stubber.requests[0].url == 'https://s3.us-east-2.amazonaws.com/'
def test_regionalized_client_with_unknown_region(self):
client, stubber = self.create_stubbed_client('s3', 'not-real')
@@ -534,23 +515,16 @@ class TestEndpointResolution(BaseSessionTest):
client.list_buckets()
# Validate we don't fall back to partition endpoint for
# regionalized services.
- self.assertEqual(
- stubber.requests[0].url,
- 'https://s3.not-real.amazonaws.com/'
- )
+ assert stubber.requests[0].url == 'https://s3.not-real.amazonaws.com/'
def test_unregionalized_client_endpoint_resolution(self):
client, stubber = self.create_stubbed_client('iam', 'us-west-2')
stubber.add_response(body=self.xml_response)
client.list_roles()
- self.assertTrue(
- stubber.requests[0].url.startswith('https://iam.amazonaws.com/')
- )
+ assert stubber.requests[0].url.startswith('https://iam.amazonaws.com/')
def test_unregionalized_client_with_unknown_region(self):
client, stubber = self.create_stubbed_client('iam', 'not-real')
stubber.add_response(body=self.xml_response)
client.list_roles()
- self.assertTrue(
- stubber.requests[0].url.startswith('https://iam.amazonaws.com/')
- )
+ assert stubber.requests[0].url.startswith('https://iam.amazonaws.com/')
diff --git a/tests/functional/test_response_shadowing.py b/tests/functional/test_response_shadowing.py
index bd53fd91..384bacc2 100644
--- a/tests/functional/test_response_shadowing.py
+++ b/tests/functional/test_response_shadowing.py
@@ -11,6 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.session import Session
+import pytest
def _all_services():
@@ -20,6 +21,12 @@ def _all_services():
yield session.get_service_model(service_name)
+def _all_error_shapes():
+ for service_model in _all_services():
+ for shape in service_model.error_shapes:
+ yield shape
+
+
def _all_operations():
for service_model in _all_services():
for operation_name in service_model.operation_names:
@@ -35,14 +42,14 @@ def _assert_not_shadowed(key, shape):
assert key not in shape.members, msg % (shape.name, key)
-def test_response_metadata_is_not_shadowed():
- for operation_model in _all_operations():
- shape = operation_model.output_shape
- _assert_not_shadowed('ResponseMetadata', shape)
+@pytest.mark.parametrize('operation_model', _all_operations())
+def test_response_metadata_is_not_shadowed(operation_model):
+ shape = operation_model.output_shape
+ _assert_not_shadowed('ResponseMetadata', shape)
-def test_exceptions_do_not_shadow():
- for service_model in _all_services():
- for shape in service_model.error_shapes:
- _assert_not_shadowed('ResponseMetadata', shape)
- _assert_not_shadowed('Error', shape)
+
+@pytest.mark.parametrize('shape', _all_error_shapes())
+def test_exceptions_do_not_shadow(shape):
+ _assert_not_shadowed('ResponseMetadata', shape)
+ _assert_not_shadowed('Error', shape)
diff --git a/tests/functional/test_retry.py b/tests/functional/test_retry.py
index cb2e7d28..5c9c7391 100644
--- a/tests/functional/test_retry.py
+++ b/tests/functional/test_retry.py
@@ -12,6 +12,7 @@
# language governing permissions and limitations under the License.
import contextlib
import json
+import pytest
from tests import BaseSessionTest, mock, ClientHTTPStubber
from botocore.exceptions import ClientError
@@ -39,10 +40,9 @@ class BaseRetryTest(BaseSessionTest):
with ClientHTTPStubber(client) as http_stubber:
for _ in range(num_responses):
http_stubber.add_response(status=status, body=body)
- with six.assertRaisesRegex(self,
- ClientError, 'reached max retries: %s' % num_retries):
+ with pytest.raises(ClientError, match='reached max retries: %s' % num_retries):
yield
- self.assertEqual(len(http_stubber.requests), num_responses)
+ assert len(http_stubber.requests) == num_responses
class TestLegacyRetry(BaseRetryTest):
@@ -183,8 +183,6 @@ class TestRetriesV2(BaseRetryTest):
# verify by looking at the request metadata.
with ClientHTTPStubber(client) as http_stubber:
http_stubber.add_response(status=502, body=b'{}')
- with self.assertRaises(ClientError) as e:
+ with pytest.raises(ClientError) as e:
client.list_tables()
- self.assertTrue(
- e.exception.response['ResponseMetadata'].get('RetryQuotaReached')
- )
+ assert e.response['ResponseMetadata'].get('RetryQuotaReached')
\ No newline at end of file
diff --git a/tests/functional/test_route53.py b/tests/functional/test_route53.py
index 2dcd63a7..4ebd7586 100644
--- a/tests/functional/test_route53.py
+++ b/tests/functional/test_route53.py
@@ -38,7 +38,7 @@ class TestRoute53Pagination(unittest.TestCase):
with self.stubber:
config={'PageSize': 1}
results = list(paginator.paginate(PaginationConfig=config))
- self.assertTrue(len(results) >= 0)
+ assert len(results) >= 0
def test_paginate_with_max_items_str(self):
# Route53 has a string type for MaxItems. We need to ensure that this
@@ -48,7 +48,7 @@ class TestRoute53Pagination(unittest.TestCase):
with self.stubber:
config={'PageSize': '1'}
results = list(paginator.paginate(PaginationConfig=config))
- self.assertTrue(len(results) >= 0)
+ assert len(results) >= 0
class TestRoute53EndpointResolution(BaseSessionTest):
@@ -63,10 +63,10 @@ class TestRoute53EndpointResolution(BaseSessionTest):
client, stubber = self.create_stubbed_client('route53', 'us-west-2')
client.list_geo_locations()
expected_url = 'https://route53.amazonaws.com/'
- self.assertTrue(stubber.requests[0].url.startswith(expected_url))
+ assert stubber.requests[0].url.startswith(expected_url)
def test_unregionalized_client_with_unknown_region(self):
client, stubber = self.create_stubbed_client('route53', 'not-real')
client.list_geo_locations()
expected_url = 'https://route53.amazonaws.com/'
- self.assertTrue(stubber.requests[0].url.startswith(expected_url))
+ assert stubber.requests[0].url.startswith(expected_url)
diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py
index 34b256d4..19adad48 100644
--- a/tests/functional/test_s3.py
+++ b/tests/functional/test_s3.py
@@ -11,7 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
-
+import pytest
from tests import temporary_file
from tests import (unittest, mock, BaseSessionTest, create_session,
ClientHTTPStubber)
@@ -29,7 +29,7 @@ class TestS3BucketValidation(unittest.TestCase):
def test_invalid_bucket_name_raises_error(self):
session = botocore.session.get_session()
s3 = session.create_client('s3')
- with self.assertRaises(ParamValidationError):
+ with pytest.raises(ParamValidationError):
s3.put_object(Bucket='adfgasdfadfs/bucket/name',
Key='foo', Body=b'asdf')
@@ -64,7 +64,7 @@ class BaseS3ClientConfigurationTest(BaseSessionTest):
class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
def test_no_s3_config(self):
client = self.create_s3_client()
- self.assertIsNone(client.meta.config.s3)
+ assert client.meta.config.s3 is None
def test_client_s3_dualstack_handles_uppercase_true(self):
with temporary_file('w') as f:
@@ -75,8 +75,7 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
' use_dualstack_endpoint = True'
)
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3['use_dualstack_endpoint'], True)
+ assert client.meta.config.s3['use_dualstack_endpoint'] is True
def test_client_s3_dualstack_handles_lowercase_true(self):
with temporary_file('w') as f:
@@ -87,8 +86,7 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
' use_dualstack_endpoint = true'
)
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3['use_dualstack_endpoint'], True)
+ assert client.meta.config.s3['use_dualstack_endpoint'] is True
def test_client_s3_accelerate_handles_uppercase_true(self):
with temporary_file('w') as f:
@@ -99,8 +97,7 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
' use_accelerate_endpoint = True'
)
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3['use_accelerate_endpoint'], True)
+ assert client.meta.config.s3['use_accelerate_endpoint'] is True
def test_client_s3_accelerate_handles_lowercase_true(self):
with temporary_file('w') as f:
@@ -111,8 +108,7 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
' use_accelerate_endpoint = true'
)
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3['use_accelerate_endpoint'], True)
+ assert client.meta.config.s3['use_accelerate_endpoint'] is True
def test_client_payload_signing_enabled_handles_uppercase_true(self):
with temporary_file('w') as f:
@@ -123,8 +119,7 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
' payload_signing_enabled = True'
)
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3['payload_signing_enabled'], True)
+ assert client.meta.config.s3['payload_signing_enabled'] is True
def test_client_payload_signing_enabled_handles_lowercase_true(self):
with temporary_file('w') as f:
@@ -135,8 +130,7 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
' payload_signing_enabled = true'
)
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3['payload_signing_enabled'], True)
+ assert client.meta.config.s3['payload_signing_enabled'] is True
def test_includes_unmodeled_s3_config_vars(self):
with temporary_file('w') as f:
@@ -147,8 +141,7 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
' unmodeled = unmodeled_val'
)
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3['unmodeled'], 'unmodeled_val')
+ assert client.meta.config.s3['unmodeled'] == 'unmodeled_val'
def test_mixed_modeled_and_unmodeled_config_vars(self):
with temporary_file('w') as f:
@@ -160,23 +153,17 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
' unmodeled = unmodeled_val'
)
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3,
- {
+ assert client.meta.config.s3 == {
'payload_signing_enabled': True,
'unmodeled': 'unmodeled_val'
}
- )
def test_use_arn_region(self):
self.environ['AWS_S3_USE_ARN_REGION'] = 'true'
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3,
- {
+ assert client.meta.config.s3 == {
'use_arn_region': True,
}
- )
def test_use_arn_region_config_var(self):
with temporary_file('w') as f:
@@ -186,12 +173,9 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
's3_use_arn_region = true'
)
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3,
- {
+ assert client.meta.config.s3 == {
'use_arn_region': True,
}
- )
def test_use_arn_region_nested_config_var(self):
with temporary_file('w') as f:
@@ -202,22 +186,16 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
' use_arn_region = true'
)
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3,
- {
+ assert client.meta.config.s3 == {
'use_arn_region': True,
}
- )
def test_use_arn_region_is_case_insensitive(self):
self.environ['AWS_S3_USE_ARN_REGION'] = 'True'
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3,
- {
+ assert client.meta.config.s3 == {
'use_arn_region': True,
}
- )
def test_use_arn_region_env_var_overrides_config_var(self):
self.environ['AWS_S3_USE_ARN_REGION'] = 'false'
@@ -229,12 +207,9 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
' use_arn_region = true'
)
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3,
- {
+ assert client.meta.config.s3 == {
'use_arn_region': False,
}
- )
def test_client_config_use_arn_region_overrides_env_var(self):
self.environ['AWS_S3_USE_ARN_REGION'] = 'true'
@@ -243,12 +218,9 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
s3={'use_arn_region': False}
)
)
- self.assertEqual(
- client.meta.config.s3,
- {
+ assert client.meta.config.s3 == {
'use_arn_region': False,
}
- )
def test_client_config_use_arn_region_overrides_config_var(self):
with temporary_file('w') as f:
@@ -263,33 +235,23 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
s3={'use_arn_region': False}
)
)
- self.assertEqual(
- client.meta.config.s3,
- {
+ assert client.meta.config.s3 == {
'use_arn_region': False,
}
- )
def test_use_arn_region_is_case_insensitive(self):
self.environ['AWS_S3_USE_ARN_REGION'] = 'True'
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3,
- {
+ assert client.meta.config.s3 == {
'use_arn_region': True,
}
- )
-
def test_us_east_1_regional_env_var(self):
self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional'
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3,
- {
+ assert client.meta.config.s3 == {
'us_east_1_regional_endpoint': 'regional',
}
- )
def test_us_east_1_regional_config_var(self):
with temporary_file('w') as f:
@@ -299,12 +261,9 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
's3_us_east_1_regional_endpoint = regional'
)
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3,
- {
+ assert client.meta.config.s3 == {
'us_east_1_regional_endpoint': 'regional',
}
- )
def test_us_east_1_regional_nested_config_var(self):
with temporary_file('w') as f:
@@ -315,12 +274,9 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
' us_east_1_regional_endpoint = regional'
)
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3,
- {
+ assert client.meta.config.s3 == {
'us_east_1_regional_endpoint': 'regional',
}
- )
def test_us_east_1_regional_env_var_overrides_config_var(self):
self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional'
@@ -332,12 +288,9 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
' us_east_1_regional_endpoint = legacy'
)
client = self.create_s3_client()
- self.assertEqual(
- client.meta.config.s3,
- {
+ assert client.meta.config.s3 == {
'us_east_1_regional_endpoint': 'regional',
}
- )
def test_client_config_us_east_1_regional_overrides_env_var(self):
self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional'
@@ -346,12 +299,9 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
s3={'us_east_1_regional_endpoint': 'legacy'}
)
)
- self.assertEqual(
- client.meta.config.s3,
- {
+ assert client.meta.config.s3 == {
'us_east_1_regional_endpoint': 'legacy',
}
- )
def test_client_config_us_east_1_regional_overrides_config_var(self):
with temporary_file('w') as f:
@@ -366,15 +316,12 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
s3={'us_east_1_regional_endpoint': 'regional'}
)
)
- self.assertEqual(
- client.meta.config.s3,
- {
+ assert client.meta.config.s3 == {
'us_east_1_regional_endpoint': 'regional',
}
- )
def test_client_validates_us_east_1_regional(self):
- with self.assertRaises(InvalidS3UsEast1RegionalEndpointConfigError):
+ with pytest.raises(InvalidS3UsEast1RegionalEndpointConfigError):
self.create_s3_client(
config=Config(
s3={'us_east_1_regional_endpoint': 'not-valid'}
@@ -383,30 +330,30 @@ class TestS3ClientConfigResolution(BaseS3ClientConfigurationTest):
def test_client_region_defaults_to_us_east_1(self):
client = self.create_s3_client(region_name=None)
- self.assertEqual(client.meta.region_name, 'us-east-1')
+ assert client.meta.region_name == 'us-east-1'
def test_client_region_remains_us_east_1(self):
client = self.create_s3_client(region_name='us-east-1')
- self.assertEqual(client.meta.region_name, 'us-east-1')
+ assert client.meta.region_name == 'us-east-1'
def test_client_region_remains_aws_global(self):
client = self.create_s3_client(region_name='aws-global')
- self.assertEqual(client.meta.region_name, 'aws-global')
+ assert client.meta.region_name == 'aws-global'
def test_client_region_defaults_to_aws_global_for_regional(self):
self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional'
client = self.create_s3_client(region_name=None)
- self.assertEqual(client.meta.region_name, 'aws-global')
+ assert client.meta.region_name == 'aws-global'
def test_client_region_remains_us_east_1_for_regional(self):
self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional'
client = self.create_s3_client(region_name='us-east-1')
- self.assertEqual(client.meta.region_name, 'us-east-1')
+ assert client.meta.region_name == 'us-east-1'
def test_client_region_remains_aws_global_for_regional(self):
self.environ['AWS_S3_US_EAST_1_REGIONAL_ENDPOINT'] = 'regional'
client = self.create_s3_client(region_name='aws-global')
- self.assertEqual(client.meta.region_name, 'aws-global')
+ assert client.meta.region_name == 'aws-global'
class TestS3Copy(BaseS3OperationTest):
@@ -447,9 +394,9 @@ class TestS3Copy(BaseS3OperationTest):
)
# Validate we retried and got second body
- self.assertEqual(len(self.http_stubber.requests), 2)
- self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
- self.assertTrue('CopyObjectResult' in response)
+ assert len(self.http_stubber.requests) == 2
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ assert 'CopyObjectResult' in response
def test_s3_copy_object_with_incomplete_response(self):
self.client, self.http_stubber = self.create_stubbed_s3_client(
@@ -458,7 +405,7 @@ class TestS3Copy(BaseS3OperationTest):
incomplete_body = b'<?xml version="1.0" encoding="UTF-8"?>\n\n\n'
self.http_stubber.add_response(status=200, body=incomplete_body)
- with self.assertRaises(ResponseParserError):
+ with pytest.raises(ResponseParserError):
self.client.copy_object(
Bucket='bucket',
CopySource='other-bucket/test.txt',
@@ -488,7 +435,7 @@ class TestAccesspointArn(BaseS3ClientConfigurationTest):
auth_header = request.headers['Authorization'].decode('utf-8')
actual_region = self._V4_AUTH_REGEX.match(
auth_header).group('signing_region')
- self.assertEqual(expected_region, actual_region)
+ assert expected_region == actual_region
def assert_signing_name(self, request, expected_name):
auth_header = request.headers['Authorization'].decode('utf-8')
@@ -498,14 +445,13 @@ class TestAccesspointArn(BaseS3ClientConfigurationTest):
def assert_signing_region_in_url(self, url, expected_region):
qs_components = parse_qs(urlsplit(url).query)
- self.assertIn(expected_region, qs_components['X-Amz-Credential'][0])
+ assert expected_region in qs_components['X-Amz-Credential'][0]
def assert_expected_copy_source_header(self,
http_stubber, expected_copy_source):
request = self.http_stubber.requests[0]
- self.assertIn('x-amz-copy-source', request.headers)
- self.assertEqual(
- request.headers['x-amz-copy-source'], expected_copy_source)
+ assert 'x-amz-copy-source' in request.headers
+ assert request.headers['x-amz-copy-source'] == expected_copy_source
def add_copy_object_response(self, http_stubber):
http_stubber.add_response(
@@ -520,42 +466,42 @@ class TestAccesspointArn(BaseS3ClientConfigurationTest):
accesspoint_arn = (
'arn:aws:s3::123456789012:accesspoint:myendpoint'
)
- with self.assertRaises(botocore.exceptions.ParamValidationError):
+ with pytest.raises(botocore.exceptions.ParamValidationError):
self.client.list_objects(Bucket=accesspoint_arn)
def test_missing_account_id_in_arn(self):
accesspoint_arn = (
'arn:aws:s3:us-west-2::accesspoint:myendpoint'
)
- with self.assertRaises(botocore.exceptions.ParamValidationError):
+ with pytest.raises(botocore.exceptions.ParamValidationError):
self.client.list_objects(Bucket=accesspoint_arn)
def test_missing_accesspoint_name_in_arn(self):
accesspoint_arn = (
'arn:aws:s3:us-west-2:123456789012:accesspoint'
)
- with self.assertRaises(botocore.exceptions.ParamValidationError):
+ with pytest.raises(botocore.exceptions.ParamValidationError):
self.client.list_objects(Bucket=accesspoint_arn)
def test_accesspoint_includes_asterisk(self):
accesspoint_arn = (
'arn:aws:s3:us-west-2:123456789012:accesspoint:*'
)
- with self.assertRaises(botocore.exceptions.ParamValidationError):
+ with pytest.raises(botocore.exceptions.ParamValidationError):
self.client.list_objects(Bucket=accesspoint_arn)
def test_accesspoint_includes_dot(self):
accesspoint_arn = (
'arn:aws:s3:us-west-2:123456789012:accesspoint:my.endpoint'
)
- with self.assertRaises(botocore.exceptions.ParamValidationError):
+ with pytest.raises(botocore.exceptions.ParamValidationError):
self.client.list_objects(Bucket=accesspoint_arn)
def test_accesspoint_arn_contains_subresources(self):
accesspoint_arn = (
'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint:object'
)
- with self.assertRaises(botocore.exceptions.ParamValidationError):
+ with pytest.raises(botocore.exceptions.ParamValidationError):
self.client.list_objects(Bucket=accesspoint_arn)
def test_accesspoint_arn_with_custom_endpoint(self):
@@ -564,7 +510,7 @@ class TestAccesspointArn(BaseS3ClientConfigurationTest):
)
self.client, _ = self.create_stubbed_s3_client(
endpoint_url='https://custom.com')
- with self.assertRaises(
+ with pytest.raises(
botocore.exceptions.
UnsupportedS3AccesspointConfigurationError):
self.client.list_objects(Bucket=accesspoint_arn)
@@ -575,7 +521,7 @@ class TestAccesspointArn(BaseS3ClientConfigurationTest):
)
self.client, _ = self.create_stubbed_s3_client(
config=Config(s3={'use_accelerate_endpoint': True}))
- with self.assertRaises(
+ with pytest.raises(
botocore.exceptions.
UnsupportedS3AccesspointConfigurationError):
self.client.list_objects(Bucket=accesspoint_arn)
@@ -586,7 +532,7 @@ class TestAccesspointArn(BaseS3ClientConfigurationTest):
)
self.client, _ = self.create_stubbed_s3_client(
region_name='cn-north-1')
- with self.assertRaises(
+ with pytest.raises(
botocore.exceptions.
UnsupportedS3AccesspointConfigurationError):
self.client.list_objects(Bucket=accesspoint_arn)
@@ -599,7 +545,7 @@ class TestAccesspointArn(BaseS3ClientConfigurationTest):
region_name='cn-north-1',
config=Config(s3={'use_accelerate_endpoint': True})
)
- with self.assertRaises(
+ with pytest.raises(
botocore.exceptions.
UnsupportedS3AccesspointConfigurationError):
self.client.list_objects(Bucket=accesspoint_arn)
@@ -812,7 +758,7 @@ class TestOnlyAsciiCharsAllowed(BaseS3OperationTest):
def test_validates_non_ascii_chars_trigger_validation_error(self):
self.http_stubber.add_response()
with self.http_stubber:
- with self.assertRaises(ParamValidationError):
+ with pytest.raises(ParamValidationError):
self.client.put_object(
Bucket='foo', Key='bar', Metadata={
'goodkey': 'good', 'non-ascii': u'\u2713'})
@@ -858,14 +804,10 @@ class TestS3GetBucketLifecycle(BaseS3OperationTest):
response = s3.get_bucket_lifecycle(Bucket='mybucket')
# Each Transition member should have at least one of the
# transitions provided.
- self.assertEqual(
- response['Rules'][0]['Transition'],
- {'Days': 40, 'StorageClass': 'STANDARD_IA'}
- )
- self.assertEqual(
- response['Rules'][1]['NoncurrentVersionTransition'],
- {'NoncurrentDays': 40, 'StorageClass': 'STANDARD_IA'}
- )
+ assert response['Rules'][0]['Transition'] == {
+ 'Days': 40, 'StorageClass': 'STANDARD_IA'}
+ assert response['Rules'][1]['NoncurrentVersionTransition'] == {
+ 'NoncurrentDays': 40, 'StorageClass': 'STANDARD_IA'}
class TestS3PutObject(BaseS3OperationTest):
@@ -899,8 +841,8 @@ class TestS3PutObject(BaseS3OperationTest):
response = s3.put_object(Bucket='mybucket', Key='mykey', Body=b'foo')
# The first response should have been retried even though the xml is
# invalid and eventually return the 200 response.
- self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
- self.assertEqual(len(http_stubber.requests), 2)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ assert len(http_stubber.requests) == 2
class TestS3SigV4(BaseS3OperationTest):
@@ -917,7 +859,7 @@ class TestS3SigV4(BaseS3OperationTest):
def test_content_md5_set(self):
with self.http_stubber:
self.client.put_object(Bucket='foo', Key='bar', Body='baz')
- self.assertIn('content-md5', self.get_sent_headers())
+ assert 'content-md5' in self.get_sent_headers()
def test_content_md5_set_empty_body(self):
with self.http_stubber:
@@ -943,7 +885,7 @@ class TestS3SigV4(BaseS3OperationTest):
self.client.put_object(Bucket='foo', Key='bar', Body='baz')
sent_headers = self.get_sent_headers()
sha_header = sent_headers.get('x-amz-content-sha256')
- self.assertNotEqual(sha_header, b'UNSIGNED-PAYLOAD')
+ assert sha_header != b'UNSIGNED-PAYLOAD'
def test_content_sha256_not_set_if_config_value_is_false(self):
config = Config(signature_version='s3v4', s3={
@@ -957,7 +899,7 @@ class TestS3SigV4(BaseS3OperationTest):
self.client.put_object(Bucket='foo', Key='bar', Body='baz')
sent_headers = self.get_sent_headers()
sha_header = sent_headers.get('x-amz-content-sha256')
- self.assertEqual(sha_header, b'UNSIGNED-PAYLOAD')
+ assert sha_header == b'UNSIGNED-PAYLOAD'
def test_content_sha256_set_if_md5_is_unavailable(self):
with mock.patch('botocore.auth.MD5_AVAILABLE', False):
@@ -966,8 +908,8 @@ class TestS3SigV4(BaseS3OperationTest):
self.client.put_object(Bucket='foo', Key='bar', Body='baz')
sent_headers = self.get_sent_headers()
unsigned = 'UNSIGNED-PAYLOAD'
- self.assertNotEqual(sent_headers['x-amz-content-sha256'], unsigned)
- self.assertNotIn('content-md5', sent_headers)
+ assert sent_headers['x-amz-content-sha256'] != unsigned
+ assert'content-md5' not in sent_headers
@@ -984,7 +926,7 @@ class TestCanSendIntegerHeaders(BaseSessionTest):
# Verify that the request integer value of 3 has been converted to
# string '3'. This also means we've made it pass the signer which
# expects string values in order to sign properly.
- self.assertEqual(headers['Content-Length'], b'3')
+ assert headers['Content-Length'], b'3'
class TestRegionRedirect(BaseS3OperationTest):
@@ -1049,16 +991,16 @@ class TestRegionRedirect(BaseS3OperationTest):
self.http_stubber.add_response(**self.success_response)
with self.http_stubber:
response = self.client.list_objects(Bucket='foo')
- self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
- self.assertEqual(len(self.http_stubber.requests), 2)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
+ assert len(self.http_stubber.requests) == 2
initial_url = ('https://s3.us-west-2.amazonaws.com/foo'
'?encoding-type=url')
- self.assertEqual(self.http_stubber.requests[0].url, initial_url)
+ assert self.http_stubber.requests[0].url == initial_url
fixed_url = ('https://s3.eu-central-1.amazonaws.com/foo'
'?encoding-type=url')
- self.assertEqual(self.http_stubber.requests[1].url, fixed_url)
+ assert self.http_stubber.requests[1].url == fixed_url
def test_region_redirect_cache(self):
self.http_stubber.add_response(**self.redirect_response)
@@ -1069,20 +1011,18 @@ class TestRegionRedirect(BaseS3OperationTest):
first_response = self.client.list_objects(Bucket='foo')
second_response = self.client.list_objects(Bucket='foo')
- self.assertEqual(
- first_response['ResponseMetadata']['HTTPStatusCode'], 200)
- self.assertEqual(
- second_response['ResponseMetadata']['HTTPStatusCode'], 200)
+ assert first_response['ResponseMetadata']['HTTPStatusCode'] == 200
+ assert second_response['ResponseMetadata']['HTTPStatusCode'] == 200
- self.assertEqual(len(self.http_stubber.requests), 3)
+ assert len(self.http_stubber.requests) == 3
initial_url = ('https://s3.us-west-2.amazonaws.com/foo'
'?encoding-type=url')
- self.assertEqual(self.http_stubber.requests[0].url, initial_url)
+ assert self.http_stubber.requests[0].url == initial_url
fixed_url = ('https://s3.eu-central-1.amazonaws.com/foo'
'?encoding-type=url')
- self.assertEqual(self.http_stubber.requests[1].url, fixed_url)
- self.assertEqual(self.http_stubber.requests[2].url, fixed_url)
+ assert self.http_stubber.requests[1].url == fixed_url
+ assert self.http_stubber.requests[2].url == fixed_url
def test_resign_request_with_region_when_needed(self):
@@ -1093,17 +1033,16 @@ class TestRegionRedirect(BaseS3OperationTest):
http_stubber.add_response(**self.bad_signing_region_response)
http_stubber.add_response(**self.success_response)
first_response = client.list_objects(Bucket='foo')
- self.assertEqual(
- first_response['ResponseMetadata']['HTTPStatusCode'], 200)
+ assert first_response['ResponseMetadata']['HTTPStatusCode'] == 200
- self.assertEqual(len(http_stubber.requests), 2)
+ assert len(http_stubber.requests) == 2
initial_url = ('https://foo.s3.us-west-2.amazonaws.com/'
'?encoding-type=url')
- self.assertEqual(http_stubber.requests[0].url, initial_url)
+ assert http_stubber.requests[0].url == initial_url
fixed_url = ('https://foo.s3.eu-central-1.amazonaws.com/'
'?encoding-type=url')
- self.assertEqual(http_stubber.requests[1].url, fixed_url)
+ assert http_stubber.requests[1].url == fixed_url
def test_resign_request_in_us_east_1(self):
region_headers = {'x-amz-bucket-region': 'eu-central-1'}
@@ -1116,14 +1055,14 @@ class TestRegionRedirect(BaseS3OperationTest):
http_stubber.add_response(headers=region_headers)
http_stubber.add_response()
response = client.head_object(Bucket='foo', Key='bar')
- self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 200)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
- self.assertEqual(len(http_stubber.requests), 4)
+ assert len(http_stubber.requests) == 4
initial_url = ('https://foo.s3.amazonaws.com/bar')
- self.assertEqual(http_stubber.requests[0].url, initial_url)
+ assert http_stubber.requests[0].url == initial_url
fixed_url = ('https://foo.s3.eu-central-1.amazonaws.com/bar')
- self.assertEqual(http_stubber.requests[-1].url, fixed_url)
+ assert http_stubber.requests[-1].url == fixed_url
def test_resign_request_in_us_east_1_fails(self):
region_headers = {'x-amz-bucket-region': 'eu-central-1'}
@@ -1137,9 +1076,9 @@ class TestRegionRedirect(BaseS3OperationTest):
http_stubber.add_response(headers=region_headers)
# The final request still fails with a 400.
http_stubber.add_response(status=400)
- with self.assertRaises(ClientError) as e:
+ with pytest.raises(ClientError):
client.head_object(Bucket='foo', Key='bar')
- self.assertEqual(len(http_stubber.requests), 4)
+ assert len(http_stubber.requests) == 4
def test_no_region_redirect_for_accesspoint(self):
self.http_stubber.add_response(**self.redirect_response)
@@ -1150,8 +1089,7 @@ class TestRegionRedirect(BaseS3OperationTest):
try:
self.client.list_objects(Bucket=accesspoint_arn)
except self.client.exceptions.ClientError as e:
- self.assertEqual(
- e.response['Error']['Code'], 'PermanentRedirect')
+ assert e.response['Error']['Code'] == 'PermanentRedirect'
else:
self.fail('PermanentRedirect error should have been raised')
@@ -1162,9 +1100,9 @@ class TestGeneratePresigned(BaseS3OperationTest):
# Assert that it looks like a v2 presigned url by asserting it does
# not have a couple of the v4 qs components and assert that it has the
# v2 Signature component.
- self.assertNotIn('X-Amz-Credential', qs_components)
- self.assertNotIn('X-Amz-Algorithm', qs_components)
- self.assertIn('Signature', qs_components)
+ assert 'X-Amz-Credential' not in qs_components
+ assert 'X-Amz-Algorithm' not in qs_components
+ assert 'Signature' in qs_components
def test_generate_unauthed_url(self):
config = Config(signature_version=botocore.UNSIGNED)
@@ -1175,7 +1113,7 @@ class TestGeneratePresigned(BaseS3OperationTest):
'Bucket': 'foo',
'Key': 'bar'
})
- self.assertEqual(url, 'https://foo.s3.amazonaws.com/bar')
+ assert url == 'https://foo.s3.amazonaws.com/bar'
def test_generate_unauthed_post(self):
config = Config(signature_version=botocore.UNSIGNED)
@@ -1185,40 +1123,39 @@ class TestGeneratePresigned(BaseS3OperationTest):
'fields': {'key': 'bar'},
'url': 'https://foo.s3.amazonaws.com/'
}
- self.assertEqual(parts, expected)
+ assert parts == expected
def test_default_presign_uses_sigv2(self):
url = self.client.generate_presigned_url(ClientMethod='list_buckets')
- self.assertNotIn('Algorithm=AWS4-HMAC-SHA256', url)
+ assert 'Algorithm=AWS4-HMAC-SHA256' not in url
def test_sigv4_presign(self):
config = Config(signature_version='s3v4')
client = self.session.create_client('s3', self.region, config=config)
url = client.generate_presigned_url(ClientMethod='list_buckets')
- self.assertIn('Algorithm=AWS4-HMAC-SHA256', url)
+ assert 'Algorithm=AWS4-HMAC-SHA256' in url
def test_sigv2_presign(self):
config = Config(signature_version='s3')
client = self.session.create_client('s3', self.region, config=config)
url = client.generate_presigned_url(ClientMethod='list_buckets')
- self.assertNotIn('Algorithm=AWS4-HMAC-SHA256', url)
+ assert 'Algorithm=AWS4-HMAC-SHA256' not in url
def test_uses_sigv4_for_unknown_region(self):
client = self.session.create_client('s3', 'us-west-88')
url = client.generate_presigned_url(ClientMethod='list_buckets')
- self.assertIn('Algorithm=AWS4-HMAC-SHA256', url)
+ assert 'Algorithm=AWS4-HMAC-SHA256' in url
def test_default_presign_sigv4_in_sigv4_only_region(self):
client = self.session.create_client('s3', 'us-east-2')
url = client.generate_presigned_url(ClientMethod='list_buckets')
- self.assertIn('Algorithm=AWS4-HMAC-SHA256', url)
+ assert 'Algorithm=AWS4-HMAC-SHA256' in url
def test_presign_unsigned(self):
config = Config(signature_version=botocore.UNSIGNED)
client = self.session.create_client('s3', 'us-east-2', config=config)
url = client.generate_presigned_url(ClientMethod='list_buckets')
- self.assertEqual(
- 'https://s3.us-east-2.amazonaws.com/', url)
+ assert 'https://s3.us-east-2.amazonaws.com/' == url
def test_presign_url_with_ssec(self):
config = Config(signature_version='s3')
@@ -1234,9 +1171,7 @@ class TestGeneratePresigned(BaseS3OperationTest):
)
# The md5 of the sse-c key will be injected when parameters are
# built so it should show up in the presigned url as well.
- self.assertIn(
- 'x-amz-server-side-encryption-customer-key-md5=', url
- )
+ assert 'x-amz-server-side-encryption-customer-key-md5=' in url
def test_presign_s3_accelerate(self):
config = Config(signature_version=botocore.UNSIGNED,
@@ -1247,8 +1182,7 @@ class TestGeneratePresigned(BaseS3OperationTest):
Params={'Bucket': 'mybucket', 'Key': 'mykey'}
)
# The url should be the accelerate endpoint
- self.assertEqual(
- 'https://mybucket.s3-accelerate.amazonaws.com/mykey', url)
+ assert 'https://mybucket.s3-accelerate.amazonaws.com/mykey' == url
def test_presign_post_s3_accelerate(self):
config = Config(signature_version=botocore.UNSIGNED,
@@ -1261,7 +1195,7 @@ class TestGeneratePresigned(BaseS3OperationTest):
'fields': {'key': 'mykey'},
'url': 'https://mybucket.s3-accelerate.amazonaws.com/'
}
- self.assertEqual(parts, expected)
+ assert parts == expected
def test_presign_uses_v2_for_aws_global(self):
client = self.session.create_client('s3', 'aws-global')
diff --git a/tests/functional/test_s3_control.py b/tests/functional/test_s3_control.py
index afb7e171..01b5b2ce 100644
--- a/tests/functional/test_s3_control.py
+++ b/tests/functional/test_s3_control.py
@@ -37,18 +37,17 @@ class S3ControlOperationTest(BaseSessionTest):
def test_does_add_account_id_to_host(self):
self.client.get_public_access_block(AccountId='123')
- self.assertEqual(self.http_session_send_mock.call_count, 1)
+ assert self.http_session_send_mock.call_count == 1
request = self.http_session_send_mock.call_args_list[0][0][0]
- self.assertTrue(request.url.startswith(
- 'https://123.s3-control.us-west-2.amazonaws.com'))
+ assert request.url.startswith('https://123.s3-control.us-west-2.amazonaws.com')
def test_does_not_remove_account_id_from_headers(self):
self.client.get_public_access_block(AccountId='123')
- self.assertEqual(self.http_session_send_mock.call_count, 1)
+ assert self.http_session_send_mock.call_count == 1
request = self.http_session_send_mock.call_args_list[0][0][0]
- self.assertIn('x-amz-account-id', request.headers)
+ assert 'x-amz-account-id' in request.headers
def test_does_support_dualstack_endpoint(self):
# Re-create the client with the use_dualstack_endpoint configuration
@@ -60,7 +59,6 @@ class S3ControlOperationTest(BaseSessionTest):
)
self.client.get_public_access_block(AccountId='123')
- self.assertEqual(self.http_session_send_mock.call_count, 1)
+ assert self.http_session_send_mock.call_count == 1
request = self.http_session_send_mock.call_args_list[0][0][0]
- self.assertTrue(request.url.startswith(
- 'https://123.s3-control.dualstack.us-west-2.amazonaws.com'))
+ assert request.url.startswith('https://123.s3-control.dualstack.us-west-2.amazonaws.com')
diff --git a/tests/functional/test_sagemaker.py b/tests/functional/test_sagemaker.py
index 5c914c0b..037aab80 100644
--- a/tests/functional/test_sagemaker.py
+++ b/tests/functional/test_sagemaker.py
@@ -25,9 +25,9 @@ class TestSagemaker(BaseSessionTest):
)
self.stubber.add_response('list_endpoints', {'Endpoints': []})
self.client.list_endpoints()
- self.assertEqual(self.hook_calls, [
+ assert self.hook_calls == [
'provide-client-params.sagemaker.ListEndpoints'
- ])
+ ]
def test_event_with_new_prefix(self):
self.client.meta.events.register(
@@ -36,6 +36,6 @@ class TestSagemaker(BaseSessionTest):
)
self.stubber.add_response('list_endpoints', {'Endpoints': []})
self.client.list_endpoints()
- self.assertEqual(self.hook_calls, [
+ assert self.hook_calls == [
'provide-client-params.sagemaker.ListEndpoints'
- ])
+ ]
diff --git a/tests/functional/test_service_alias.py b/tests/functional/test_service_alias.py
index cd58e6ac..4795086c 100644
--- a/tests/functional/test_service_alias.py
+++ b/tests/functional/test_service_alias.py
@@ -10,14 +10,15 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import pytest
import botocore.session
from botocore.handlers import SERVICE_NAME_ALIASES
-def test_can_use_service_alias():
+@pytest.mark.parametrize("alias,name", SERVICE_NAME_ALIASES.items())
+def test_can_use_service_alias(alias, name):
session = botocore.session.get_session()
- for (alias, name) in SERVICE_NAME_ALIASES.items():
- _instantiates_the_same_client(session, name, alias)
+ _instantiates_the_same_client(session, name, alias)
def _instantiates_the_same_client(session, service_name, service_alias):
diff --git a/tests/functional/test_service_names.py b/tests/functional/test_service_names.py
index 09ba40d3..4a7ff6d3 100644
--- a/tests/functional/test_service_names.py
+++ b/tests/functional/test_service_names.py
@@ -11,6 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
+import pytest
from botocore.session import get_session
@@ -54,10 +55,13 @@ def _assert_name_pattern(service_name):
VALID_NAME_EXPLANATION
-def test_service_names_are_valid():
+def _all_service_names():
session = get_session()
loader = session.get_component('data_loader')
- service_names = loader.list_available_services('service-2')
- for service_name in service_names:
- _assert_name_length(service_name)
- _assert_name_pattern(service_name)
+ return loader.list_available_services('service-2')
+
+
+@pytest.mark.parametrize("service_name", _all_service_names())
+def test_service_names_are_valid(service_name):
+ _assert_name_length(service_name)
+ _assert_name_pattern(service_name)
diff --git a/tests/functional/test_session.py b/tests/functional/test_session.py
index 7eb67de5..83df38b6 100644
--- a/tests/functional/test_session.py
+++ b/tests/functional/test_session.py
@@ -31,7 +31,7 @@ class TestSession(unittest.TestCase):
def test_profile_precedence(self):
self.environ['AWS_PROFILE'] = 'from_env_var'
self.session.set_config_variable('profile', 'from_session_instance')
- self.assertEqual(self.session.profile, 'from_session_instance')
+ assert self.session.profile == 'from_session_instance'
def test_credentials_with_profile_precedence(self):
self.environ['AWS_PROFILE'] = 'from_env_var'
@@ -39,8 +39,8 @@ class TestSession(unittest.TestCase):
try:
creds = self.session.get_credentials()
except ProfileNotFound as e:
- self.assertNotIn('from_env_var', str(e))
- self.assertIn('from_session_instance', str(e))
+ assert 'from_env_var' not in str(e)
+ assert 'from_session_instance' in str(e)
def test_session_profile_overrides_env_vars(self):
# If the ".profile" attribute is set then the associated
@@ -63,8 +63,8 @@ class TestSession(unittest.TestCase):
self.session.set_config_variable('profile',
'from_session_instance')
creds = self.session.get_credentials()
- self.assertEqual(creds.access_key, 'shared_creds_akid')
- self.assertEqual(creds.secret_key, 'shared_creds_sak')
+ assert creds.access_key == 'shared_creds_akid'
+ assert creds.secret_key == 'shared_creds_sak'
def test_profile_does_not_win_if_all_from_env_vars(self):
# Creds should be pulled from the env vars because
@@ -88,19 +88,19 @@ class TestSession(unittest.TestCase):
creds = self.session.get_credentials()
- self.assertEqual(creds.access_key, 'env_var_akid')
- self.assertEqual(creds.secret_key, 'env_var_sak')
+ assert creds.access_key == 'env_var_akid'
+ assert creds.secret_key == 'env_var_sak'
def test_provides_available_regions_for_same_endpoint_prefix(self):
regions = self.session.get_available_regions('s3')
- self.assertTrue(regions)
+ assert regions
def test_provides_available_regions_for_different_endpoint_prefix(self):
regions = self.session.get_available_regions('elb')
- self.assertTrue(regions)
+ assert regions
def test_does_not_provide_regions_for_mismatch_service_name(self):
# elb's endpoint prefix is elasticloadbalancing, but users should
# still be using the service name when getting regions
regions = self.session.get_available_regions('elasticloadbalancing')
- self.assertEqual(regions, [])
+ assert regions == []
diff --git a/tests/functional/test_six_imports.py b/tests/functional/test_six_imports.py
index bab6a3f1..146a1f65 100644
--- a/tests/functional/test_six_imports.py
+++ b/tests/functional/test_six_imports.py
@@ -1,21 +1,22 @@
import os
import botocore
import ast
+import pytest
ROOTDIR = os.path.dirname(botocore.__file__)
-def test_no_bare_six_imports():
- for rootdir, dirnames, filenames in os.walk(ROOTDIR):
- if 'vendored' in dirnames:
- # We don't need to lint our vendored packages.
- dirnames.remove('vendored')
- for filename in filenames:
- if not filename.endswith('.py'):
- continue
- fullname = os.path.join(rootdir, filename)
- _assert_no_bare_six_imports(fullname)
+@pytest.mark.parametrize("rootdir,dirnames,filenames", os.walk(ROOTDIR))
+def test_no_bare_six_imports(rootdir, dirnames, filenames):
+ if 'vendored' in dirnames:
+ # We don't need to lint our vendored packages.
+ dirnames.remove('vendored')
+ for filename in filenames:
+ if not filename.endswith('.py'):
+ continue
+ fullname = os.path.join(rootdir, filename)
+ _assert_no_bare_six_imports(fullname)
def _assert_no_bare_six_imports(filename):
diff --git a/tests/functional/test_sts.py b/tests/functional/test_sts.py
index 8c7b55d7..d8377b57 100644
--- a/tests/functional/test_sts.py
+++ b/tests/functional/test_sts.py
@@ -87,12 +87,9 @@ class TestSTSEndpoints(BaseSessionTest):
http_stubber.add_response(body=body)
sts.get_caller_identity()
captured_request = http_stubber.requests[0]
- self.assertEqual(captured_request.url, expected_url)
+ assert captured_request.url == expected_url
if expected_signing_region:
- self.assertEqual(
- self._get_signing_region(captured_request),
- expected_signing_region
- )
+ assert self._get_signing_region(captured_request) == expected_signing_region
def _get_signing_region(self, request):
authorization_val = request.headers['Authorization'].decode('utf-8')
diff --git a/tests/functional/test_stub.py b/tests/functional/test_stub.py
index 4a30dcaa..e8087ed3 100644
--- a/tests/functional/test_stub.py
+++ b/tests/functional/test_stub.py
@@ -12,6 +12,7 @@
# language governing permissions and limitations under the License.
import botocore.config
from tests import unittest
+import pytest
import botocore
import botocore.session
@@ -42,7 +43,7 @@ class TestStubber(unittest.TestCase):
self.stubber.add_response('list_objects', service_response)
self.stubber.activate()
response = self.client.list_objects(Bucket='foo')
- self.assertEqual(response, service_response)
+ assert response == service_response
def test_context_manager_returns_response(self):
service_response = {'ResponseMetadata': {'foo': 'bar'}}
@@ -50,13 +51,12 @@ class TestStubber(unittest.TestCase):
with self.stubber:
response = self.client.list_objects(Bucket='foo')
- self.assertEqual(response, service_response)
+ assert response == service_response
def test_activated_stubber_errors_with_no_registered_stubs(self):
self.stubber.activate()
# Params one per line for readability.
- with six.assertRaisesRegex(self, UnStubbedResponseError,
- "Unexpected API Call"):
+ with pytest.raises(UnStubbedResponseError, match=r"Unexpected API Call"):
self.client.list_objects(
Bucket='asdfasdfasdfasdf',
Delimiter='asdfasdfasdfasdf',
@@ -68,7 +68,7 @@ class TestStubber(unittest.TestCase):
self.stubber.activate()
self.client.list_objects(Bucket='foo')
- with self.assertRaises(UnStubbedResponseError):
+ with pytest.raises(UnStubbedResponseError):
self.client.list_objects(Bucket='foo')
def test_client_error_response(self):
@@ -78,7 +78,7 @@ class TestStubber(unittest.TestCase):
'list_objects', error_code, error_message)
self.stubber.activate()
- with self.assertRaises(ClientError):
+ with pytest.raises(ClientError):
self.client.list_objects(Bucket='foo')
def test_can_add_expected_params_to_client_error(self):
@@ -87,7 +87,7 @@ class TestStubber(unittest.TestCase):
expected_params={'Bucket': 'foo'}
)
self.stubber.activate()
- with self.assertRaises(ClientError):
+ with pytest.raises(ClientError):
self.client.list_objects(Bucket='foo')
def test_can_expected_param_fails_in_client_error(self):
@@ -99,7 +99,7 @@ class TestStubber(unittest.TestCase):
# We expect an AssertionError instead of a ClientError
# because we're calling the operation with the wrong
# param value.
- with self.assertRaises(AssertionError):
+ with pytest.raises(AssertionError):
self.client.list_objects(Bucket='wrong-argument-value')
def test_expected_params_success(self):
@@ -111,7 +111,7 @@ class TestStubber(unittest.TestCase):
# This should be called successfully with no errors being thrown
# for mismatching expected params.
response = self.client.list_objects(Bucket='foo')
- self.assertEqual(response, service_response)
+ assert response == service_response
def test_expected_params_fail(self):
service_response = {}
@@ -120,8 +120,7 @@ class TestStubber(unittest.TestCase):
'list_objects', service_response, expected_params)
self.stubber.activate()
# This should call should raise an for mismatching expected params.
- with six.assertRaisesRegex(self, StubResponseError,
- "{'Bucket': 'bar'},\n"):
+ with pytest.raises(StubResponseError, match=r"{'Bucket': 'bar'},\n"):
self.client.list_objects(Bucket='foo')
def test_expected_params_mixed_with_errors_responses(self):
@@ -140,12 +139,11 @@ class TestStubber(unittest.TestCase):
self.stubber.activate()
# The first call should throw and error as expected.
- with self.assertRaises(ClientError):
+ with pytest.raises(ClientError):
self.client.list_objects(Bucket='foo')
# The second call should throw an error for unexpected parameters
- with six.assertRaisesRegex(self, StubResponseError,
- 'Expected parameters'):
+ with pytest.raises(StubResponseError, match=r'Expected parameters'):
self.client.list_objects(Bucket='foo')
def test_can_continue_to_call_after_expected_params_fail(self):
@@ -157,7 +155,7 @@ class TestStubber(unittest.TestCase):
self.stubber.activate()
# Throw an error for unexpected parameters
- with self.assertRaises(StubResponseError):
+ with pytest.raises(StubResponseError):
self.client.list_objects(Bucket='foo')
# The stubber should still have the responses queued up
@@ -174,7 +172,7 @@ class TestStubber(unittest.TestCase):
self.stubber.activate()
# Throw an error for invalid parameters
- with self.assertRaises(ParamValidationError):
+ with pytest.raises(ParamValidationError):
self.client.list_objects(Buck='bar')
def test_any_ignores_param_for_validation(self):
@@ -245,7 +243,7 @@ class TestStubber(unittest.TestCase):
"stub.ANY failed to ignore nested parameter for validation.")
def test_ANY_repr(self):
- self.assertEqual(repr(stub.ANY), '<ANY>')
+ assert repr(stub.ANY) == '<ANY>'
def test_none_param(self):
service_response = {}
@@ -256,7 +254,7 @@ class TestStubber(unittest.TestCase):
self.stubber.activate()
# Throw an error for invalid parameters
- with self.assertRaises(StubAssertionError):
+ with pytest.raises(StubAssertionError):
self.client.list_objects(Buck='bar')
def test_many_expected_params(self):
@@ -286,8 +284,7 @@ class TestStubber(unittest.TestCase):
'Key': 'mykey'
}
)
- self.assertEqual(
- url, 'https://s3.amazonaws.com/mybucket/mykey')
+ assert url == 'https://s3.amazonaws.com/mybucket/mykey'
except StubResponseError:
self.fail(
'Stubbed responses should not be required for generating '
@@ -310,10 +307,9 @@ class TestStubber(unittest.TestCase):
'Key': 'myotherkey'
}
)
- self.assertEqual(
- url, 'https://s3.amazonaws.com/myotherbucket/myotherkey')
+ assert url == 'https://s3.amazonaws.com/myotherbucket/myotherkey'
actual_response = self.client.list_objects(**expected_params)
- self.assertEqual(desired_response, actual_response)
+ assert desired_response == actual_response
self.stubber.assert_no_pending_responses()
def test_parse_get_bucket_location(self):
@@ -323,7 +319,7 @@ class TestStubber(unittest.TestCase):
'get_bucket_location', error_code, error_message)
self.stubber.activate()
- with self.assertRaises(ClientError):
+ with pytest.raises(ClientError):
self.client.get_bucket_location(Bucket='foo')
def test_parse_get_bucket_location_returns_response(self):
@@ -331,5 +327,5 @@ class TestStubber(unittest.TestCase):
self.stubber.add_response('get_bucket_location',service_response)
self.stubber.activate()
response = self.client.get_bucket_location(Bucket='foo')
- self.assertEqual(response, service_response)
+ assert response == service_response
diff --git a/tests/functional/test_utils.py b/tests/functional/test_utils.py
index b9b9c2b6..dd78f6fc 100644
--- a/tests/functional/test_utils.py
+++ b/tests/functional/test_utils.py
@@ -42,7 +42,7 @@ class TestFileWebIdentityTokenLoader(unittest.TestCase):
def test_can_load_token(self):
loader = FileWebIdentityTokenLoader(self.token_file)
token = loader()
- self.assertEqual(self.token, token)
+ assert self.token == token
class TestInstanceMetadataFetcher(unittest.TestCase):
@@ -51,15 +51,15 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
fetcher = InstanceMetadataFetcher()
send_mock.side_effect = ConnectionClosedError(endpoint_url="foo")
creds = fetcher.retrieve_iam_role_credentials()
- self.assertEquals(send_mock.call_count, 2)
+ assert send_mock.call_count == 2
for call_instance in send_mock.call_args_list:
self.assertTrue(call_instance[0][0].url.startswith(fetcher.get_base_url()))
- self.assertEquals(creds, {})
+ assert creds == {}
def test_catch_invalid_imds_error(self):
with mock.patch('botocore.httpsession.URLLib3Session.send') as send_mock:
fetcher = InstanceMetadataFetcher()
e = LocationParseError(location="foo")
send_mock.side_effect = HTTPClientError(error=e)
- with self.assertRaises(InvalidIMDSEndpointError):
+ pytest.raises(InvalidIMDSEndpointError):
fetcher.retrieve_iam_role_credentials()
diff --git a/tests/functional/test_waiter_config.py b/tests/functional/test_waiter_config.py
index 87646926..01782007 100644
--- a/tests/functional/test_waiter_config.py
+++ b/tests/functional/test_waiter_config.py
@@ -12,6 +12,7 @@
# language governing permissions and limitations under the License.
import jmespath
from jsonschema import Draft4Validator
+import pytest
import botocore.session
from botocore.exceptions import UnknownServiceError
@@ -82,25 +83,25 @@ WAITER_SCHEMA = {
}
-def test_lint_waiter_configs():
+@pytest.mark.parametrize("service_name", botocore.session.get_session().get_available_services())
+def test_lint_waiter_configs(service_name):
session = botocore.session.get_session()
validator = Draft4Validator(WAITER_SCHEMA)
- for service_name in session.get_available_services():
- client = session.create_client(service_name, 'us-east-1')
- service_model = client.meta.service_model
- try:
- # We use the loader directly here because we need the entire
- # json document, not just the portions exposed (either
- # internally or externally) by the WaiterModel class.
- loader = session.get_component('data_loader')
- waiter_model = loader.load_service_model(
- service_name, 'waiters-2')
- except UnknownServiceError:
- # The service doesn't have waiters
- continue
- _validate_schema(validator, waiter_model)
- for waiter_name in client.waiter_names:
- _lint_single_waiter(client, waiter_name, service_model)
+ client = session.create_client(service_name, 'us-east-1')
+ service_model = client.meta.service_model
+ try:
+ # We use the loader directly here because we need the entire
+ # json document, not just the portions exposed (either
+ # internally or externally) by the WaiterModel class.
+ loader = session.get_component('data_loader')
+ waiter_model = loader.load_service_model(
+ service_name, 'waiters-2')
+ except UnknownServiceError:
+ # The service doesn't have waiters
+ return
+ _validate_schema(validator, waiter_model)
+ for waiter_name in client.waiter_names:
+ _lint_single_waiter(client, waiter_name, service_model)
def _lint_single_waiter(client, waiter_name, service_model):
--
2.29.2
From 93a8dc2ee98184c848eda6942b0dfc83c7a7c73a Mon Sep 17 00:00:00 2001
From: Zidaan Dutta <ziddutta@amazon.com>
Date: Mon, 19 Oct 2020 19:18:36 -0400
Subject: [PATCH 07/14] pytest migration of botocore integration tests
---
tests/integration/test_apigateway.py | 2 +-
tests/integration/test_client.py | 34 +--
tests/integration/test_client_http.py | 11 +-
tests/integration/test_cloudformation.py | 3 +-
tests/integration/test_credentials.py | 16 +-
tests/integration/test_ec2.py | 18 +-
tests/integration/test_elastictranscoder.py | 6 +-
tests/integration/test_emr.py | 20 +-
tests/integration/test_glacier.py | 13 +-
tests/integration/test_loaders.py | 17 +-
tests/integration/test_rds.py | 8 +-
tests/integration/test_route53.py | 7 +-
tests/integration/test_s3.py | 250 +++++++++-----------
tests/integration/test_session.py | 10 +-
tests/integration/test_smoke.py | 46 ++--
tests/integration/test_sts.py | 10 +-
tests/integration/test_utils.py | 14 +-
tests/integration/test_waiters.py | 6 +-
18 files changed, 238 insertions(+), 253 deletions(-)
diff --git a/tests/integration/test_apigateway.py b/tests/integration/test_apigateway.py
index c95d0681..1dd032e8 100644
--- a/tests/integration/test_apigateway.py
+++ b/tests/integration/test_apigateway.py
@@ -76,4 +76,4 @@ class TestApigateway(unittest.TestCase):
uri='https://api.endpoint.com'
)
# Assert the response was successful by checking the integration type
- self.assertEqual(response['type'], 'HTTP')
+ assert response['type'] == 'HTTP'
diff --git a/tests/integration/test_client.py b/tests/integration/test_client.py
index cdaa4286..92f696b0 100644
--- a/tests/integration/test_client.py
+++ b/tests/integration/test_client.py
@@ -12,6 +12,7 @@
# language governing permissions and limitations under the License.
import logging
import datetime
+import pytest
from tests import unittest, random_chars
import botocore.session
@@ -38,8 +39,8 @@ class TestResponseLog(unittest.TestCase):
session.set_stream_logger('', logging.DEBUG, debug_log)
client.list_buckets()
debug_log_contents = debug_log.getvalue()
- self.assertIn('Response headers', debug_log_contents)
- self.assertIn('Response body', debug_log_contents)
+ assert 'Response headers' in debug_log_contents
+ assert 'Response body' in debug_log_contents
class TestAcceptedDateTimeFormats(unittest.TestCase):
@@ -50,26 +51,26 @@ class TestAcceptedDateTimeFormats(unittest.TestCase):
def test_accepts_datetime_object(self):
response = self.client.list_clusters(
CreatedAfter=datetime.datetime.now())
- self.assertIn('Clusters', response)
+ assert 'Clusters' in response
def test_accepts_epoch_format(self):
response = self.client.list_clusters(CreatedAfter=0)
- self.assertIn('Clusters', response)
+ assert 'Clusters' in response
def test_accepts_iso_8601_unaware(self):
response = self.client.list_clusters(
CreatedAfter='2014-01-01T00:00:00')
- self.assertIn('Clusters', response)
+ assert 'Clusters' in response
def test_accepts_iso_8601_utc(self):
response = self.client.list_clusters(
CreatedAfter='2014-01-01T00:00:00Z')
- self.assertIn('Clusters', response)
+ assert 'Clusters' in response
def test_accepts_iso_8701_local(self):
response = self.client.list_clusters(
CreatedAfter='2014-01-01T00:00:00-08:00')
- self.assertIn('Clusters', response)
+ assert 'Clusters' in response
class TestClientErrors(unittest.TestCase):
@@ -79,21 +80,20 @@ class TestClientErrors(unittest.TestCase):
def test_region_mentioned_in_invalid_region(self):
client = self.session.create_client(
'cloudformation', region_name='us-east-999')
- with six.assertRaisesRegex(self, EndpointConnectionError,
- 'Could not connect to the endpoint URL'):
+ with pytest.raises(EndpointConnectionError, match=r'Could not connect to the endpoint URL'):
client.list_stacks()
def test_client_modeled_exception(self):
client = self.session.create_client(
'dynamodb', region_name='us-west-2')
- with self.assertRaises(client.exceptions.ResourceNotFoundException):
+ with pytest.raises(client.exceptions.ResourceNotFoundException):
client.describe_table(TableName="NonexistentTable")
def test_client_modeleded_exception_with_differing_code(self):
client = self.session.create_client('iam', region_name='us-west-2')
# The NoSuchEntityException should be raised on NoSuchEntity error
# code.
- with self.assertRaises(client.exceptions.NoSuchEntityException):
+ with pytest.raises(client.exceptions.NoSuchEntityException):
client.get_role(RoleName="NonexistentIAMRole")
def test_raises_general_client_error_for_non_modeled_exception(self):
@@ -101,14 +101,14 @@ class TestClientErrors(unittest.TestCase):
try:
client.describe_regions(DryRun=True)
except client.exceptions.ClientError as e:
- self.assertIs(e.__class__, ClientError)
+ assert isinstance(e, ClientError)
def test_can_catch_client_exceptions_across_two_different_clients(self):
client = self.session.create_client(
'dynamodb', region_name='us-west-2')
client2 = self.session.create_client(
'dynamodb', region_name='us-west-2')
- with self.assertRaises(client2.exceptions.ResourceNotFoundException):
+ with pytest.raises(client2.exceptions.ResourceNotFoundException):
client.describe_table(TableName="NonexistentTable")
@@ -118,12 +118,12 @@ class TestClientMeta(unittest.TestCase):
def test_region_name_on_meta(self):
client = self.session.create_client('s3', 'us-west-2')
- self.assertEqual(client.meta.region_name, 'us-west-2')
+ assert client.meta.region_name == 'us-west-2'
def test_endpoint_url_on_meta(self):
client = self.session.create_client('s3', 'us-west-2',
endpoint_url='https://foo')
- self.assertEqual(client.meta.endpoint_url, 'https://foo')
+ assert client.meta.endpoint_url == 'https://foo'
class TestClientInjection(unittest.TestCase):
@@ -144,7 +144,7 @@ class TestClientInjection(unittest.TestCase):
client = self.session.create_client('s3', 'us-west-2')
# We should now have access to the extra_client_method above.
- self.assertEqual(client.extra_client_method('foo'), 'foo')
+ assert client.extra_client_method('foo') == 'foo'
class TestMixedEndpointCasing(unittest.TestCase):
@@ -157,4 +157,4 @@ class TestMixedEndpointCasing(unittest.TestCase):
def test_sigv4_is_correct_when_mixed_endpoint_casing(self):
res = self.client.describe_regions()
status_code = res['ResponseMetadata']['HTTPStatusCode']
- self.assertEqual(status_code, 200)
+ assert status_code == 200
diff --git a/tests/integration/test_client_http.py b/tests/integration/test_client_http.py
index 00583d47..326547c3 100644
--- a/tests/integration/test_client_http.py
+++ b/tests/integration/test_client_http.py
@@ -2,6 +2,7 @@ import select
import socket
import contextlib
import threading
+import pytest
from tests import unittest
from contextlib import contextmanager
@@ -76,11 +77,11 @@ class TestClientHTTPBehavior(unittest.TestCase):
self.fail('Fake EC2 service was not called.')
def test_read_timeout_exception(self):
- with self.assertRaises(ReadTimeoutError):
+ with pytest.raises(ReadTimeoutError):
self._read_timeout_server()
def test_old_read_timeout_exception(self):
- with self.assertRaises(requests_exceptions.ReadTimeout):
+ with pytest.raises(requests_exceptions.ReadTimeout):
self._read_timeout_server()
@unittest.skip('The current implementation will fail to timeout on linux')
@@ -105,7 +106,7 @@ class TestClientHTTPBehavior(unittest.TestCase):
with background(no_accept_server):
server_bound_event.wait(timeout=60)
- with self.assertRaises(ConnectTimeoutError):
+ with pytest.raises(ConnectTimeoutError):
client.describe_regions()
client_call_ended_event.set()
@@ -114,7 +115,7 @@ class TestClientHTTPBehavior(unittest.TestCase):
endpoint = 'https://ec2.us-weast-1.amazonaws.com/'
client = self.session.create_client('ec2', endpoint_url=endpoint,
config=config)
- with self.assertRaises(EndpointConnectionError):
+ with pytest.raises(EndpointConnectionError):
client.describe_regions()
def test_bad_status_line(self):
@@ -129,7 +130,7 @@ class TestClientHTTPBehavior(unittest.TestCase):
self.wfile.write(b'garbage')
with background(run_server, args=(BadStatusHandler, self.port)):
- with self.assertRaises(ConnectionClosedError):
+ with pytest.raises(ConnectionClosedError):
BadStatusHandler.event.wait(timeout=60)
client.describe_regions()
diff --git a/tests/integration/test_cloudformation.py b/tests/integration/test_cloudformation.py
index d806e5be..78b40dfa 100644
--- a/tests/integration/test_cloudformation.py
+++ b/tests/integration/test_cloudformation.py
@@ -11,6 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest, random_chars
+import pytest
import botocore.session
from botocore.exceptions import ClientError
@@ -24,7 +25,7 @@ class TestCloudformation(unittest.TestCase):
def test_handles_errors_with_template_body(self):
# GetTemplate has a customization in handlers.py, so we're ensuring
# it handles the case when a stack does not exist.
- with self.assertRaises(ClientError):
+ with pytest.raises(ClientError):
self.client.get_template(
StackName='does-not-exist-%s' % random_chars(10))
diff --git a/tests/integration/test_credentials.py b/tests/integration/test_credentials.py
index 1f7f4532..75ee2ac3 100644
--- a/tests/integration/test_credentials.py
+++ b/tests/integration/test_credentials.py
@@ -60,8 +60,8 @@ class TestCredentialPrecedence(BaseEnvVar):
s = self.create_session()
credentials = s.get_credentials()
- self.assertEqual(credentials.access_key, 'env')
- self.assertEqual(credentials.secret_key, 'env-secret')
+ assert credentials.access_key == 'env'
+ assert credentials.secret_key == 'env-secret'
@mock.patch('botocore.credentials.Credentials')
def test_access_secret_vs_profile_code(self, credentials_cls):
@@ -83,8 +83,8 @@ class TestCredentialPrecedence(BaseEnvVar):
credentials = s.get_credentials()
- self.assertEqual(credentials.access_key, 'default')
- self.assertEqual(credentials.secret_key, 'default-secret')
+ assert credentials.access_key == 'default'
+ assert credentials.secret_key == 'default-secret'
@mock.patch('botocore.credentials.Credentials')
def test_access_secret_env_vs_code(self, credentials_cls):
@@ -114,8 +114,8 @@ class TestCredentialPrecedence(BaseEnvVar):
credentials = s.get_credentials()
- self.assertEqual(credentials.access_key, 'test')
- self.assertEqual(credentials.secret_key, 'test-secret')
+ assert credentials.access_key == 'test'
+ assert credentials.secret_key == 'test-secret'
def test_honors_aws_shared_credentials_file_env_var(self):
with temporary_file('w') as f:
@@ -127,8 +127,8 @@ class TestCredentialPrecedence(BaseEnvVar):
s = Session()
credentials = s.get_credentials()
- self.assertEqual(credentials.access_key, 'custom1')
- self.assertEqual(credentials.secret_key, 'custom2')
+ assert credentials.access_key == 'custom1'
+ assert credentials.secret_key == 'custom2'
class TestAssumeRoleCredentials(BaseEnvVar):
diff --git a/tests/integration/test_ec2.py b/tests/integration/test_ec2.py
index b6fe5826..2e0e765b 100644
--- a/tests/integration/test_ec2.py
+++ b/tests/integration/test_ec2.py
@@ -12,6 +12,7 @@
# language governing permissions and limitations under the License.
from tests import unittest
import itertools
+import pytest
import botocore.session
from botocore.exceptions import ClientError
@@ -28,13 +29,12 @@ class TestEC2(unittest.TestCase):
result = self.client.describe_availability_zones()
zones = list(
sorted(a['ZoneName'] for a in result['AvailabilityZones']))
- self.assertTrue(
- set(['us-west-2a', 'us-west-2b', 'us-west-2c']).issubset(zones))
+ assert set(['us-west-2a', 'us-west-2b', 'us-west-2c']).issubset(zones)
def test_get_console_output_handles_error(self):
# Want to ensure the underlying ClientError is propogated
# on error.
- with self.assertRaises(ClientError):
+ with pytest.raises(ClientError):
self.client.get_console_output(InstanceId='i-12345')
@@ -50,8 +50,8 @@ class TestEC2Pagination(unittest.TestCase):
'describe_reserved_instances_offerings')
pages = paginator.paginate()
results = list(itertools.islice(pages, 0, 3))
- self.assertEqual(len(results), 3)
- self.assertTrue(results[0]['NextToken'] != results[1]['NextToken'])
+ assert len(results) == 3
+ assert results[0]['NextToken'] != results[1]['NextToken']
def test_can_paginate_with_page_size(self):
# Using an operation that we know will paginate.
@@ -59,12 +59,12 @@ class TestEC2Pagination(unittest.TestCase):
'describe_reserved_instances_offerings')
pages = paginator.paginate(PaginationConfig={'PageSize': 1})
results = list(itertools.islice(pages, 0, 3))
- self.assertEqual(len(results), 3)
+ assert len(results) == 3
for parsed in results:
reserved_inst_offer = parsed['ReservedInstancesOfferings']
# There should be no more than one reserved instance
# offering on each page.
- self.assertLessEqual(len(reserved_inst_offer), 1)
+ assert len(reserved_inst_offer) <= 1
def test_can_fall_back_to_old_starting_token(self):
# Using an operation that we know will paginate.
@@ -74,8 +74,8 @@ class TestEC2Pagination(unittest.TestCase):
try:
results = list(itertools.islice(pages, 0, 3))
- self.assertEqual(len(results), 3)
- self.assertTrue(results[0]['NextToken'] != results[1]['NextToken'])
+ assert len(results) == 3
+ assert results[0]['NextToken'] != results[1]['NextToken']
except ValueError:
self.fail("Old style paginator failed.")
diff --git a/tests/integration/test_elastictranscoder.py b/tests/integration/test_elastictranscoder.py
index c84cca70..dca0256b 100644
--- a/tests/integration/test_elastictranscoder.py
+++ b/tests/integration/test_elastictranscoder.py
@@ -57,11 +57,11 @@ class TestElasticTranscoder(unittest.TestCase):
def test_list_streams(self):
parsed = self.client.list_pipelines()
- self.assertIn('Pipelines', parsed)
+ assert 'Pipelines' in parsed
def test_list_presets(self):
parsed = self.client.list_presets(Ascending='true')
- self.assertIn('Presets', parsed)
+ assert 'Presets' in parsed
def test_create_pipeline(self):
# In order to create a pipeline, we need to create 2 s3 buckets
@@ -78,7 +78,7 @@ class TestElasticTranscoder(unittest.TestCase):
'Warning': '', 'Error': ''})
pipeline_id = parsed['Pipeline']['Id']
self.addCleanup(self.client.delete_pipeline, Id=pipeline_id)
- self.assertIn('Pipeline', parsed)
+ assert 'Pipeline' in parsed
if __name__ == '__main__':
diff --git a/tests/integration/test_emr.py b/tests/integration/test_emr.py
index d3b898b1..071dfb06 100644
--- a/tests/integration/test_emr.py
+++ b/tests/integration/test_emr.py
@@ -11,22 +11,24 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
+import pytest
import botocore.session
from botocore.paginate import PageIterator
from botocore.exceptions import OperationNotPageableError
+EMR_REGIONS = ['us-east-1', 'us-west-2', 'ap-northeast-1',
+'ap-southeast-1', 'ap-southeast-2', 'sa-east-1', 'eu-west-1',
+'eu-central-1']
-def test_emr_endpoints_work_with_py26():
+@pytest.mark.parametrize('region', EMR_REGIONS)
+def test_emr_endpoints_work_with_py26(region):
# Verify that we can talk to all currently supported EMR endpoints.
# Python2.6 has an SSL cert bug where it can't read the SAN of
# certain SSL certs. We therefore need to always use the CN
# as the hostname.
session = botocore.session.get_session()
- for region in ['us-east-1', 'us-west-2', 'us-west-2', 'ap-northeast-1',
- 'ap-southeast-1', 'ap-southeast-2', 'sa-east-1', 'eu-west-1',
- 'eu-central-1']:
- yield _test_can_list_clusters_in_region, session, region
+ _test_can_list_clusters_in_region(session, region)
def _test_can_list_clusters_in_region(session, region):
@@ -47,18 +49,18 @@ class TestEMRGetExtraResources(unittest.TestCase):
# Using an operation that we know will paginate.
paginator = self.client.get_paginator('list_clusters')
page_iterator = paginator.paginate()
- self.assertIsInstance(page_iterator, PageIterator)
+ assert isinstance(page_iterator, PageIterator)
def test_operation_cant_be_paginated(self):
- with self.assertRaises(OperationNotPageableError):
+ with pytest.raises(OperationNotPageableError):
self.client.get_paginator('add_instance_groups')
def test_can_get_waiters(self):
waiter = self.client.get_waiter('cluster_running')
- self.assertTrue(hasattr(waiter, 'wait'))
+ assert hasattr(waiter, 'wait')
def test_waiter_does_not_exist(self):
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
self.client.get_waiter('does_not_exist')
diff --git a/tests/integration/test_glacier.py b/tests/integration/test_glacier.py
index 23a7f348..da73cd87 100644
--- a/tests/integration/test_glacier.py
+++ b/tests/integration/test_glacier.py
@@ -11,6 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
+import pytest
from botocore.exceptions import ClientError
from botocore.vendored import six
@@ -38,10 +39,10 @@ class TestGlacier(unittest.TestCase):
def test_can_list_vaults_without_account_id(self):
response = self.client.list_vaults()
- self.assertIn('VaultList', response)
+ assert 'VaultList' in response
def test_can_handle_error_responses(self):
- with self.assertRaises(ClientError):
+ with pytest.raises(ClientError):
self.client.list_vaults(accountId='asdf')
def test_can_upload_archive(self):
@@ -49,21 +50,21 @@ class TestGlacier(unittest.TestCase):
response = self.client.upload_archive(vaultName=self.VAULT_NAME,
archiveDescription='test upload',
body=body)
- self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 201)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 201
archive_id = response['archiveId']
response = self.client.delete_archive(vaultName=self.VAULT_NAME,
archiveId=archive_id)
- self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 204)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 204
def test_can_upload_archive_from_bytes(self):
response = self.client.upload_archive(vaultName=self.VAULT_NAME,
archiveDescription='test upload',
body=b'bytes body')
- self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 201)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 201
archive_id = response['archiveId']
response = self.client.delete_archive(vaultName=self.VAULT_NAME,
archiveId=archive_id)
- self.assertEqual(response['ResponseMetadata']['HTTPStatusCode'], 204)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 204
if __name__ == '__main__':
diff --git a/tests/integration/test_loaders.py b/tests/integration/test_loaders.py
index 33804fd8..c4e7be08 100644
--- a/tests/integration/test_loaders.py
+++ b/tests/integration/test_loaders.py
@@ -36,32 +36,31 @@ class TestLoaderBasicFunctionality(unittest.TestCase):
self.patched.stop()
def test_search_path_has_at_least_one_entry(self):
- self.assertTrue(len(self.loader.search_paths) > 0)
+ assert len(self.loader.search_paths) > 0
def test_can_list_available_services(self):
# We don't want an exact check, as this list changes over time.
# We just need a basic sanity check.
available_services = self.loader.list_available_services(
type_name='service-2')
- self.assertIn('ec2', available_services)
- self.assertIn('s3', available_services)
+ assert 'ec2' in available_services
+ assert 's3' in available_services
def test_can_determine_latest_version(self):
api_versions = self.loader.list_api_versions(
service_name='ec2', type_name='service-2')
- self.assertEqual(
- self.loader.determine_latest_version(
- service_name='ec2', type_name='service-2'),
- max(api_versions))
+ assert self.loader.determine_latest_version(
+ service_name='ec2',
+ type_name='service-2') == max(api_versions)
def test_can_load_service_model(self):
waiters = self.loader.load_service_model(
service_name='ec2', type_name='waiters-2')
- self.assertIn('waiters', waiters)
+ assert 'waiters' in waiters
def test_can_load_data(self):
api_version = self.loader.determine_latest_version(
service_name='ec2', type_name='service-2')
data = self.loader.load_data(
os.path.join('ec2', api_version, 'service-2'))
- self.assertIn('metadata', data)
+ assert 'metadata' in data
diff --git a/tests/integration/test_rds.py b/tests/integration/test_rds.py
index 0f15ccd4..931e6f4d 100644
--- a/tests/integration/test_rds.py
+++ b/tests/integration/test_rds.py
@@ -27,16 +27,16 @@ class TestRDSPagination(unittest.TestCase):
'describe_reserved_db_instances_offerings')
generator = paginator.paginate()
results = list(itertools.islice(generator, 0, 3))
- self.assertEqual(len(results), 3)
- self.assertTrue(results[0]['Marker'] != results[1]['Marker'])
+ assert len(results) == 3
+ assert results[0]['Marker'] != results[1]['Marker']
def test_can_paginate_orderable_db(self):
paginator = self.client.get_paginator(
'describe_orderable_db_instance_options')
generator = paginator.paginate(Engine='mysql')
results = list(itertools.islice(generator, 0, 2))
- self.assertEqual(len(results), 2)
- self.assertTrue(results[0].get('Marker') != results[1].get('Marker'))
+ assert len(results) == 2
+ assert results[0].get('Marker') != results[1].get('Marker')
if __name__ == '__main__':
diff --git a/tests/integration/test_route53.py b/tests/integration/test_route53.py
index 54165632..5d01d35b 100644
--- a/tests/integration/test_route53.py
+++ b/tests/integration/test_route53.py
@@ -11,6 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
+import pytest
import botocore.session
from botocore.exceptions import ClientError
@@ -26,7 +27,7 @@ class TestRoute53Pagination(unittest.TestCase):
# still works without any issues.
paginator = self.client.get_paginator('list_hosted_zones')
results = list(paginator.paginate(PaginationConfig={'MaxItems': '1'}))
- self.assertTrue(len(results) >= 0)
+ assert len(results) >= 0
def test_paginate_with_deprecated_paginator_and_limited_input_tokens(self):
paginator = self.client.get_paginator('list_resource_record_sets')
@@ -34,7 +35,7 @@ class TestRoute53Pagination(unittest.TestCase):
# We're making sure the paginator gets set without failing locally, so
# a ClientError is acceptable. In this case, the Hosted Zone specified
# does not exist.
- with self.assertRaises(ClientError):
+ with pytest.raises(ClientError):
results = list(paginator.paginate(
PaginationConfig={
'MaxItems': '1',
@@ -42,7 +43,7 @@ class TestRoute53Pagination(unittest.TestCase):
},
HostedZoneId="foo"
))
- self.assertTrue(len(results) >= 0)
+ assert len(results) >= 0
if __name__ == '__main__':
diff --git a/tests/integration/test_s3.py b/tests/integration/test_s3.py
index fea3e061..5f3a5fd6 100644
--- a/tests/integration/test_s3.py
+++ b/tests/integration/test_s3.py
@@ -132,10 +132,7 @@ class BaseS3ClientTest(unittest.TestCase):
self.client = self.session.create_client('s3', region_name=self.region)
def assert_status_code(self, response, status_code):
- self.assertEqual(
- response['ResponseMetadata']['HTTPStatusCode'],
- status_code
- )
+ assert response['ResponseMetadata']['HTTPStatusCode'] == status_code
def create_bucket(self, region_name, bucket_name=None, client=None):
bucket_client = client or self.client
@@ -250,8 +247,8 @@ class TestS3BaseWithBucket(BaseS3ClientTest):
PaginationConfig={
'MaxItems': max_items})
iterators = pages.result_key_iters()
- self.assertEqual(len(iterators), 2)
- self.assertEqual(iterators[0].result_key.expression, 'Uploads')
+ assert len(iterators) == 2
+ assert iterators[0].result_key.expression == 'Uploads'
# It sometimes takes a while for all the uploads to show up,
# especially if the upload was just created. If we don't
# see the expected amount, we retry up to num_attempts time
@@ -292,13 +289,13 @@ class TestS3Buckets(TestS3BaseWithBucket):
result = self.client.list_buckets()
# Can't really assume anything about whether or not they have buckets,
# but we can assume something about the structure of the response.
- self.assertEqual(sorted(list(result.keys())),
- ['Buckets', 'Owner', 'ResponseMetadata'])
+ keys = ['Buckets', 'Owner', 'ResponseMetadata']
+ assert sorted(list(result.keys())) == keys
def test_can_get_bucket_location(self):
result = self.client.get_bucket_location(Bucket=self.bucket_name)
- self.assertIn('LocationConstraint', result)
- self.assertEqual(result['LocationConstraint'], self.region)
+ assert 'LocationConstraint' in result
+ assert result['LocationConstraint'] == self.region
class TestS3Objects(TestS3BaseWithBucket):
@@ -311,13 +308,13 @@ class TestS3Objects(TestS3BaseWithBucket):
self.create_object(key_name=key_name)
bucket_contents = self.client.list_objects(
Bucket=self.bucket_name)['Contents']
- self.assertEqual(len(bucket_contents), 1)
- self.assertEqual(bucket_contents[0]['Key'], 'a+b/foo')
+ assert len(bucket_contents) == 1
+ assert bucket_contents[0]['Key'] == 'a+b/foo'
subdir_contents = self.client.list_objects(
Bucket=self.bucket_name, Prefix='a+b')['Contents']
- self.assertEqual(len(subdir_contents), 1)
- self.assertEqual(subdir_contents[0]['Key'], 'a+b/foo')
+ assert len(subdir_contents) == 1
+ assert subdir_contents[0]['Key'] == 'a+b/foo'
response = self.client.delete_object(
Bucket=self.bucket_name, Key=key_name)
@@ -334,10 +331,10 @@ class TestS3Objects(TestS3BaseWithBucket):
generator = paginator.paginate(MaxKeys=1,
Bucket=self.bucket_name)
responses = list(generator)
- self.assertEqual(len(responses), 5, responses)
+ assert len(responses) == 5, responses
key_names = [el['Contents'][0]['Key']
for el in responses]
- self.assertEqual(key_names, ['key0', 'key1', 'key2', 'key3', 'key4'])
+ assert key_names == ['key0', 'key1', 'key2', 'key3', 'key4']
@pytest.mark.slow
def test_can_paginate_with_page_size(self):
@@ -350,11 +347,11 @@ class TestS3Objects(TestS3BaseWithBucket):
generator = paginator.paginate(PaginationConfig={'PageSize': 1},
Bucket=self.bucket_name)
responses = list(generator)
- self.assertEqual(len(responses), 5, responses)
+ assert len(responses) == 5, responses
data = [r for r in responses]
key_names = [el['Contents'][0]['Key']
for el in data]
- self.assertEqual(key_names, ['key0', 'key1', 'key2', 'key3', 'key4'])
+ assert key_names == ['key0', 'key1', 'key2', 'key3', 'key4']
@pytest.mark.slow
def test_result_key_iters(self):
@@ -376,8 +373,8 @@ class TestS3Objects(TestS3BaseWithBucket):
for k, val in zip(key_names, vals):
response.setdefault(k.expression, [])
response[k.expression].append(val)
- self.assertIn('Contents', response)
- self.assertIn('CommonPrefixes', response)
+ assert 'Contents' in response
+ assert 'CommonPrefixes' in response
@pytest.mark.slow
def test_can_get_and_put_object(self):
@@ -386,7 +383,7 @@ class TestS3Objects(TestS3BaseWithBucket):
data = self.client.get_object(
Bucket=self.bucket_name, Key='foobarbaz')
- self.assertEqual(data['Body'].read().decode('utf-8'), 'body contents')
+ assert data['Body'].read().decode('utf-8') == 'body contents'
def test_can_put_large_string_body_on_new_bucket(self):
body = '*' * (5 * (1024 ** 2))
@@ -404,8 +401,8 @@ class TestS3Objects(TestS3BaseWithBucket):
body = response['Body']
# Am able to set a socket timeout
body.set_socket_timeout(10)
- self.assertEqual(body.read(amt=1).decode('utf-8'), 'b')
- self.assertEqual(body.read().decode('utf-8'), 'ody contents')
+ assert body.read(amt=1).decode('utf-8') == 'b'
+ assert body.read().decode('utf-8') == 'ody contents'
def test_paginate_max_items(self):
self.create_multipart_upload('foo/key1')
@@ -430,7 +427,7 @@ class TestS3Objects(TestS3BaseWithBucket):
pages = paginator.paginate(PaginationConfig={'MaxItems': 1},
Bucket=self.bucket_name)
full_result = pages.build_full_result()
- self.assertEqual(len(full_result['Uploads']), 1)
+ assert len(full_result['Uploads']) == 1
def test_paginate_within_page_boundaries(self):
self.create_object('a')
@@ -462,21 +459,21 @@ class TestS3Objects(TestS3BaseWithBucket):
Bucket=self.bucket_name)
fourth = pages.build_full_result()
- self.assertEqual(first['Contents'][-1]['Key'], 'a')
- self.assertEqual(second['Contents'][-1]['Key'], 'b')
- self.assertEqual(third['Contents'][-1]['Key'], 'c')
- self.assertEqual(fourth['Contents'][-1]['Key'], 'd')
+ assert first['Contents'][-1]['Key'] == 'a'
+ assert second['Contents'][-1]['Key'] == 'b'
+ assert third['Contents'][-1]['Key'] == 'c'
+ assert fourth['Contents'][-1]['Key'] == 'd'
def test_unicode_key_put_list(self):
# Verify we can upload a key with a unicode char and list it as well.
key_name = u'\u2713'
self.create_object(key_name)
parsed = self.client.list_objects(Bucket=self.bucket_name)
- self.assertEqual(len(parsed['Contents']), 1)
- self.assertEqual(parsed['Contents'][0]['Key'], key_name)
+ assert len(parsed['Contents']) == 1
+ assert parsed['Contents'][0]['Key'] == key_name
parsed = self.client.get_object(
Bucket=self.bucket_name, Key=key_name)
- self.assertEqual(parsed['Body'].read().decode('utf-8'), 'foo')
+ assert parsed['Body'].read().decode('utf-8') == 'foo'
def test_unicode_system_character(self):
# Verify we can use a unicode system character which would normally
@@ -485,13 +482,13 @@ class TestS3Objects(TestS3BaseWithBucket):
self.create_object(key_name)
self.addCleanup(self.delete_object, key_name, self.bucket_name)
parsed = self.client.list_objects(Bucket=self.bucket_name)
- self.assertEqual(len(parsed['Contents']), 1)
- self.assertEqual(parsed['Contents'][0]['Key'], key_name)
+ assert len(parsed['Contents']) == 1
+ assert parsed['Contents'][0]['Key'] == key_name
parsed = self.client.list_objects(Bucket=self.bucket_name,
EncodingType='url')
- self.assertEqual(len(parsed['Contents']), 1)
- self.assertEqual(parsed['Contents'][0]['Key'], 'foo%08')
+ assert len(parsed['Contents']) == 1
+ assert parsed['Contents'][0]['Key'] == 'foo%08'
def test_unicode_system_character_with_list_v2(self):
# Verify we can use a unicode system character which would normally
@@ -500,13 +497,13 @@ class TestS3Objects(TestS3BaseWithBucket):
self.create_object(key_name)
self.addCleanup(self.delete_object, key_name, self.bucket_name)
parsed = self.client.list_objects_v2(Bucket=self.bucket_name)
- self.assertEqual(len(parsed['Contents']), 1)
- self.assertEqual(parsed['Contents'][0]['Key'], key_name)
+ assert len(parsed['Contents']) == 1
+ assert parsed['Contents'][0]['Key'] == key_name
parsed = self.client.list_objects_v2(Bucket=self.bucket_name,
EncodingType='url')
- self.assertEqual(len(parsed['Contents']), 1)
- self.assertEqual(parsed['Contents'][0]['Key'], 'foo%08')
+ assert len(parsed['Contents']) == 1
+ assert parsed['Contents'][0]['Key'] == 'foo%08'
def test_unicode_system_character_with_list_object_versions(self):
# Verify we can use a unicode system character which would normally
@@ -515,13 +512,13 @@ class TestS3Objects(TestS3BaseWithBucket):
self.create_object(key_name)
self.addCleanup(self.delete_object, key_name, self.bucket_name)
parsed = self.client.list_object_versions(Bucket=self.bucket_name)
- self.assertEqual(len(parsed['Versions']), 1)
- self.assertEqual(parsed['Versions'][0]['Key'], key_name)
+ assert len(parsed['Versions']) == 1
+ assert parsed['Versions'][0]['Key'] == key_name
parsed = self.client.list_object_versions(Bucket=self.bucket_name,
EncodingType='url')
- self.assertEqual(len(parsed['Versions']), 1)
- self.assertEqual(parsed['Versions'][0]['Key'], 'foo%03')
+ assert len(parsed['Versions']) == 1
+ assert parsed['Versions'][0]['Key'] == 'foo%03'
def test_thread_safe_auth(self):
self.auth_paths = []
@@ -543,21 +540,18 @@ class TestS3Objects(TestS3BaseWithBucket):
thread.start()
for thread in threads:
thread.join()
- self.assertEqual(
- self.caught_exceptions, [],
- "Unexpectedly caught exceptions: %s" % self.caught_exceptions)
- self.assertEqual(
- len(set(self.auth_paths)), 10,
- "Expected 10 unique auth paths, instead received: %s" %
- (self.auth_paths))
+ msg = "Unexpectedly caught exceptions: %s" % self.caught_exceptions
+ assert self.caught_exceptions == [], msg
+ expected = "Expected 10 unique auth paths, instead received: %s" % (self.auth_paths)
+ assert len(set(self.auth_paths)) == 10, expected
def test_non_normalized_key_paths(self):
# The create_object method has assertEqual checks for 200 status.
self.create_object('key./././name')
bucket_contents = self.client.list_objects(
Bucket=self.bucket_name)['Contents']
- self.assertEqual(len(bucket_contents), 1)
- self.assertEqual(bucket_contents[0]['Key'], 'key./././name')
+ assert len(bucket_contents) == 1
+ assert bucket_contents[0]['Key'] == 'key./././name'
class TestS3Regions(BaseS3ClientTest):
@@ -581,7 +575,7 @@ class TestS3Regions(BaseS3ClientTest):
data = self.client.get_object(
Bucket=bucket_name, Key='foo')
- self.assertEqual(data['Body'].read(), b'foo' * 1024)
+ assert data['Body'].read() == b'foo' * 1024
class TestS3Copy(TestS3BaseWithBucket):
@@ -598,7 +592,7 @@ class TestS3Copy(TestS3BaseWithBucket):
# Now verify we can retrieve the copied object.
data = self.client.get_object(
Bucket=self.bucket_name, Key=key_name2)
- self.assertEqual(data['Body'].read().decode('utf-8'), 'foo')
+ assert data['Body'].read().decode('utf-8') == 'foo'
def test_copy_with_query_string(self):
key_name = 'a+b/foo?notVersionid=bar'
@@ -612,7 +606,7 @@ class TestS3Copy(TestS3BaseWithBucket):
# Now verify we can retrieve the copied object.
data = self.client.get_object(
Bucket=self.bucket_name, Key=key_name2)
- self.assertEqual(data['Body'].read().decode('utf-8'), 'foo')
+ assert data['Body'].read().decode('utf-8') == 'foo'
def test_can_copy_with_dict_form(self):
key_name = 'a+b/foo?versionId=abcd'
@@ -627,7 +621,7 @@ class TestS3Copy(TestS3BaseWithBucket):
# Now verify we can retrieve the copied object.
data = self.client.get_object(
Bucket=self.bucket_name, Key=key_name2)
- self.assertEqual(data['Body'].read().decode('utf-8'), 'foo')
+ assert data['Body'].read().decode('utf-8') == 'foo'
def test_copy_with_s3_metadata(self):
key_name = 'foo.txt'
@@ -669,7 +663,7 @@ class TestS3PresignUsStandard(BaseS3PresignTest):
"Host was suppose to use DNS style, instead "
"got: %s" % presigned_url)
# Try to retrieve the object using the presigned url.
- self.assertEqual(http_get(presigned_url).data, b'foo')
+ assert http_get(presigned_url).data == b'foo'
def test_presign_with_existing_query_string_values(self):
content_disposition = 'attachment; filename=foo.txt;'
@@ -678,9 +672,8 @@ class TestS3PresignUsStandard(BaseS3PresignTest):
'Bucket': self.bucket_name, 'Key': self.key,
'ResponseContentDisposition': content_disposition})
response = http_get(presigned_url)
- self.assertEqual(response.headers['Content-Disposition'],
- content_disposition)
- self.assertEqual(response.data, b'foo')
+ assert response.headers['Content-Disposition'] == content_disposition
+ assert response.data == b'foo'
def test_presign_sigv4(self):
self.client_config.signature_version = 's3v4'
@@ -688,14 +681,12 @@ class TestS3PresignUsStandard(BaseS3PresignTest):
's3', config=self.client_config)
presigned_url = self.client.generate_presigned_url(
'get_object', Params={'Bucket': self.bucket_name, 'Key': self.key})
- self.assertTrue(
- presigned_url.startswith(
+ msg = "Host was suppose to be the us-east-1 endpoint, instead got: %s" % presigned_url
+ assert presigned_url.startswith(
'https://%s.s3.amazonaws.com/%s' % (
- self.bucket_name, self.key)),
- "Host was suppose to be the us-east-1 endpoint, instead "
- "got: %s" % presigned_url)
+ self.bucket_name, self.key)), msg
# Try to retrieve the object using the presigned url.
- self.assertEqual(http_get(presigned_url).data, b'foo')
+ assert http_get(presigned_url).data == b'foo'
def test_presign_post_sigv2(self):
@@ -718,16 +709,15 @@ class TestS3PresignUsStandard(BaseS3PresignTest):
files = {'file': ('baz', 'some data')}
# Make sure the correct endpoint is being used
- self.assertTrue(
- post_args['url'].startswith(
- 'https://%s.s3.amazonaws.com' % self.bucket_name),
- "Host was suppose to use DNS style, instead "
- "got: %s" % post_args['url'])
+ msg = "Host was suppose to use DNS style, instead got: %s" % post_args['url']
+ assert post_args['url'].startswith(
+ 'https://%s.s3.amazonaws.com' % self.bucket_name), msg
+
# Try to retrieve the object using the presigned url.
r = http_post(post_args['url'], data=post_args['fields'],
files=files)
- self.assertEqual(r.status, 204)
+ assert r.status == 204
def test_presign_post_sigv4(self):
self.client_config.signature_version = 's3v4'
@@ -753,15 +743,14 @@ class TestS3PresignUsStandard(BaseS3PresignTest):
files = {'file': ('baz', 'some data')}
# Make sure the correct endpoint is being used
- self.assertTrue(
- post_args['url'].startswith(
- 'https://%s.s3.amazonaws.com/' % self.bucket_name),
- "Host was suppose to use us-east-1 endpoint, instead "
- "got: %s" % post_args['url'])
+ msg = "Host was suppose to use us-east-1 endpoint, instead got: %s" % post_args['url']
+ assert post_args['url'].startswith(
+ 'https://%s.s3.amazonaws.com/' % self.bucket_name), msg
+
r = http_post(post_args['url'], data=post_args['fields'],
files=files)
- self.assertEqual(r.status, 204)
+ assert r.status == 204
class TestS3PresignNonUsStandard(BaseS3PresignTest):
@@ -777,14 +766,12 @@ class TestS3PresignNonUsStandard(BaseS3PresignTest):
def test_presign_sigv2(self):
presigned_url = self.client.generate_presigned_url(
'get_object', Params={'Bucket': self.bucket_name, 'Key': self.key})
- self.assertTrue(
- presigned_url.startswith(
+ msg = "Host was suppose to use DNS style, instead got: %s" % presigned_url
+ assert presigned_url.startswith(
'https://%s.s3.amazonaws.com/%s' % (
- self.bucket_name, self.key)),
- "Host was suppose to use DNS style, instead "
- "got: %s" % presigned_url)
+ self.bucket_name, self.key)), msg
# Try to retrieve the object using the presigned url.
- self.assertEqual(http_get(presigned_url).data, b'foo')
+ assert http_get(presigned_url).data == b'foo'
def test_presign_sigv4(self):
# For a newly created bucket, you can't use virtualhosted
@@ -800,14 +787,12 @@ class TestS3PresignNonUsStandard(BaseS3PresignTest):
presigned_url = self.client.generate_presigned_url(
'get_object', Params={'Bucket': self.bucket_name, 'Key': self.key})
- self.assertTrue(
- presigned_url.startswith(
+ msg = "Host was suppose to be the us-west-2 endpoint, instead got: %s" % presigned_url
+ assert presigned_url.startswith(
'https://s3.us-west-2.amazonaws.com/%s/%s' % (
- self.bucket_name, self.key)),
- "Host was suppose to be the us-west-2 endpoint, instead "
- "got: %s" % presigned_url)
+ self.bucket_name, self.key)), msg
# Try to retrieve the object using the presigned url.
- self.assertEqual(http_get(presigned_url).data, b'foo')
+ assert http_get(presigned_url).data == b'foo'
def test_presign_post_sigv2(self):
# Create some of the various supported conditions.
@@ -828,15 +813,13 @@ class TestS3PresignNonUsStandard(BaseS3PresignTest):
files = {'file': ('baz', 'some data')}
# Make sure the correct endpoint is being used
- self.assertTrue(
- post_args['url'].startswith(
- 'https://%s.s3.amazonaws.com' % self.bucket_name),
- "Host was suppose to use DNS style, instead "
- "got: %s" % post_args['url'])
+ msg = "Host was suppose to use DNS style, instead got: %s" % post_args['url']
+ assert post_args['url'].startswith(
+ 'https://%s.s3.amazonaws.com' % self.bucket_name), msg
r = http_post(post_args['url'], data=post_args['fields'],
files=files)
- self.assertEqual(r.status, 204)
+ assert r.status == 204
def test_presign_post_sigv4(self):
self.client_config.signature_version = 's3v4'
@@ -861,15 +844,14 @@ class TestS3PresignNonUsStandard(BaseS3PresignTest):
files = {'file': ('baz', 'some data')}
# Make sure the correct endpoint is being used
- self.assertTrue(
- post_args['url'].startswith(
- 'https://%s.s3.amazonaws.com/' % self.bucket_name),
- "Host was suppose to use DNS style, instead "
- "got: %s" % post_args['url'])
+ msg = "Host was suppose to use DNS style, instead got: %s" % post_args['url']
+ assert post_args['url'].startswith(
+ 'https://%s.s3.amazonaws.com/' % self.bucket_name), msg
+
r = http_post(post_args['url'], data=post_args['fields'],
files=files)
- self.assertEqual(r.status, 204)
+ assert r.status == 204
class TestCreateBucketInOtherRegion(TestS3BaseWithBucket):
@@ -917,7 +899,7 @@ class TestS3SigV4Client(BaseS3ClientTest):
# what happens once DNS propogates which is arguably more interesting,
# as DNS will point us to the eu-central-1 endpoint.
response = client.get_bucket_location(Bucket=self.bucket_name)
- self.assertEqual(response['LocationConstraint'], 'us-west-2')
+ assert response['LocationConstraint'] == 'us-west-2'
def test_request_retried_for_sigv4(self):
body = six.BytesIO(b"Hello world!")
@@ -950,7 +932,7 @@ class TestS3SigV4Client(BaseS3ClientTest):
for content in response['Contents']:
key_refs.append(content['Key'])
- self.assertEqual(key_names, key_refs)
+ assert key_names == key_refs
@pytest.mark.slow
def test_paginate_list_objects_safe_chars(self):
@@ -973,7 +955,7 @@ class TestS3SigV4Client(BaseS3ClientTest):
for content in response['Contents']:
key_refs.append(content['Key'])
- self.assertEqual(key_names, key_refs)
+ assert key_names == key_refs
def test_create_multipart_upload(self):
key = 'mymultipartupload'
@@ -992,9 +974,9 @@ class TestS3SigV4Client(BaseS3ClientTest):
)
# Make sure there is only one multipart upload.
- self.assertEqual(len(response['Uploads']), 1)
+ assert len(response['Uploads']) == 1
# Make sure the upload id is as expected.
- self.assertEqual(response['Uploads'][0]['UploadId'], upload_id)
+ assert response['Uploads'][0]['UploadId'] == upload_id
def test_can_add_double_space_metadata(self):
# Ensure we get no sigv4 errors when we send
@@ -1022,12 +1004,12 @@ class TestS3SigV4Client(BaseS3ClientTest):
aws_secret_access_key=creds.secret_key,
aws_session_token='bad-token-causes-400',
)
- with self.assertRaises(ClientError) as e:
+ with pytest.raises(ClientError) as e:
client.head_object(
Bucket=self.bucket_name,
Key='foo.txt',
)
- self.assertEqual(e.exception.response['Error']['Code'], '400')
+ assert e.value.response['Error']['Code'], '400'
class TestSSEKeyParamValidation(BaseS3ClientTest):
@@ -1053,18 +1035,16 @@ class TestSSEKeyParamValidation(BaseS3ClientTest):
self.addCleanup(self.client.delete_object,
Bucket=self.bucket_name, Key='foo2.txt')
- self.assertEqual(
- self.client.get_object(Bucket=self.bucket_name,
+ assert self.client.get_object(Bucket=self.bucket_name,
Key='foo.txt',
SSECustomerAlgorithm='AES256',
- SSECustomerKey=key_bytes)['Body'].read(),
- b'mycontents')
- self.assertEqual(
- self.client.get_object(Bucket=self.bucket_name,
+ SSECustomerKey=key_bytes
+ )['Body'].read() == b'mycontents'
+ assert self.client.get_object(Bucket=self.bucket_name,
Key='foo2.txt',
SSECustomerAlgorithm='AES256',
- SSECustomerKey=key_str)['Body'].read(),
- b'mycontents2')
+ SSECustomerKey=key_str
+ )['Body'].read() == b'mycontents2'
def test_make_request_with_sse_copy_source(self):
encrypt_key = 'a' * 32
@@ -1093,12 +1073,11 @@ class TestSSEKeyParamValidation(BaseS3ClientTest):
# Download the object using the new encryption key.
# The content should not have changed.
- self.assertEqual(
- self.client.get_object(
+ assert self.client.get_object(
Bucket=self.bucket_name, Key='bar.txt',
SSECustomerAlgorithm='AES256',
- SSECustomerKey=other_encrypt_key)['Body'].read(),
- b'mycontents')
+ SSECustomerKey=other_encrypt_key
+ )['Body'].read() == b'mycontents'
class TestS3UTF8Headers(BaseS3ClientTest):
@@ -1181,13 +1160,12 @@ class TestAutoS3Addressing(BaseS3ClientTest):
def test_can_list_buckets(self):
response = self.client.list_buckets()
- self.assertIn('Buckets', response)
+ assert 'Buckets' in response
def test_can_make_bucket_and_put_object(self):
response = self.client.put_object(
Bucket=self.bucket_name, Key='foo', Body='contents')
- self.assertEqual(
- response['ResponseMetadata']['HTTPStatusCode'], 200)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
def test_can_make_bucket_and_put_object_with_sigv4(self):
self.region = 'eu-central-1'
@@ -1195,8 +1173,7 @@ class TestAutoS3Addressing(BaseS3ClientTest):
bucket_name = self.create_bucket(self.region)
response = self.client.put_object(
Bucket=bucket_name, Key='foo', Body='contents')
- self.assertEqual(
- response['ResponseMetadata']['HTTPStatusCode'], 200)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
class TestS3VirtualAddressing(TestAutoS3Addressing):
@@ -1231,8 +1208,7 @@ class TestRegionRedirect(BaseS3ClientTest):
def test_region_redirects(self):
try:
response = self.client.list_objects(Bucket=self.bucket_name)
- self.assertEqual(
- response['ResponseMetadata']['HTTPStatusCode'], 200)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
except ClientError as e:
error = e.response['Error'].get('Code', None)
if error == 'PermanentRedirect':
@@ -1246,17 +1222,15 @@ class TestRegionRedirect(BaseS3ClientTest):
eu_bucket = self.create_bucket(self.bucket_region)
msg = 'The authorization mechanism you have provided is not supported.'
- with six.assertRaisesRegex(self, ClientError, msg):
+ with pytest.raises(ClientError, match=msg):
sigv2_client.list_objects(Bucket=eu_bucket)
def test_region_redirects_multiple_requests(self):
try:
response = self.client.list_objects(Bucket=self.bucket_name)
- self.assertEqual(
- response['ResponseMetadata']['HTTPStatusCode'], 200)
+ assert response['ResponseMetadata']['HTTPStatusCode'] == 200
second_response = self.client.list_objects(Bucket=self.bucket_name)
- self.assertEqual(
- second_response['ResponseMetadata']['HTTPStatusCode'], 200)
+ assert second_response['ResponseMetadata']['HTTPStatusCode'] == 200
except ClientError as e:
error = e.response['Error'].get('Code', None)
if error == 'PermanentRedirect':
@@ -1266,7 +1240,7 @@ class TestRegionRedirect(BaseS3ClientTest):
response = self.client.head_bucket(Bucket=self.bucket_name)
headers = response['ResponseMetadata']['HTTPHeaders']
region = headers.get('x-amz-bucket-region')
- self.assertEqual(region, self.bucket_region)
+ assert region == self.bucket_region
def test_redirects_head_object(self):
key = 'foo'
@@ -1276,7 +1250,7 @@ class TestRegionRedirect(BaseS3ClientTest):
try:
response = self.client.head_object(
Bucket=self.bucket_name, Key=key)
- self.assertEqual(response.get('ContentLength'), len(key))
+ assert response.get('ContentLength') == len(key)
except ClientError as e:
self.fail("S3 Client failed to redirect Head Object: %s" % e)
@@ -1315,7 +1289,7 @@ class TestBucketWithVersions(BaseS3ClientTest):
self.wait_until_key_exists(bucket, key)
response = self.client.get_object(Bucket=bucket, Key=key)
- self.assertEqual(response['Body'].read(), body)
+ assert response['Body'].read() == body
response = self.client.delete_object(Bucket=bucket, Key=key)
# This cleanup step removes the DeleteMarker that's created
@@ -1327,8 +1301,8 @@ class TestBucketWithVersions(BaseS3ClientTest):
VersionId=response['VersionId']
)
# Object does not exist anymore.
- with self.assertRaises(ClientError):
+ with pytest.raises(ClientError):
self.client.get_object(Bucket=bucket, Key=key)
versions = self.client.list_object_versions(Bucket=bucket)
version_ids = self.extract_version_ids(versions)
- self.assertEqual(len(version_ids), 2)
+ assert len(version_ids) == 2
diff --git a/tests/integration/test_session.py b/tests/integration/test_session.py
index 6f5fa21f..5529bf2f 100644
--- a/tests/integration/test_session.py
+++ b/tests/integration/test_session.py
@@ -29,19 +29,19 @@ class TestCanChangeParsing(unittest.TestCase):
s3 = self.session.create_client('s3', 'us-west-2')
parsed = s3.list_buckets()
dates = [bucket['CreationDate'] for bucket in parsed['Buckets']]
- self.assertTrue(all(isinstance(date, str) for date in dates),
- "Expected all str types but instead got: %s" % dates)
+ msg = "Expected all str types but instead got: %s" % dates
+ assert all(isinstance(date, str) for date in dates), msg
def test_maps_service_name_when_overriden(self):
ses = self.session.get_service_model('ses')
- self.assertEqual(ses.endpoint_prefix, 'email')
+ assert ses.endpoint_prefix == 'email'
# But we should map the service_name to be the same name
# used when calling get_service_model which is different
# than the endpoint_prefix.
- self.assertEqual(ses.service_name, 'ses')
+ assert ses.service_name == 'ses'
def test_maps_service_name_from_client(self):
# Same thing as test_maps_service_name_from_client,
# except through the client interface.
client = self.session.create_client('ses', region_name='us-east-1')
- self.assertEqual(client.meta.service_model.service_name, 'ses')
+ assert client.meta.service_model.service_name == 'ses'
diff --git a/tests/integration/test_smoke.py b/tests/integration/test_smoke.py
index d6c6aeee..55cb6ac2 100644
--- a/tests/integration/test_smoke.py
+++ b/tests/integration/test_smoke.py
@@ -14,6 +14,7 @@ import os
from pprint import pformat
import warnings
import logging
+import pytest
from tests import ClientHTTPStubber
from botocore import xform_name
@@ -243,35 +244,35 @@ def _list_services(dict_entries):
return [key for key in dict_entries if key in wanted_services]
-def test_can_make_request_with_client():
+@pytest.mark.parametrize("service_name", _list_services(SMOKE_TESTS))
+def test_can_make_request_with_client(service_name):
# Same as test_can_make_request, but with Client objects
# instead of service/operations.
session = botocore.session.get_session()
- for service_name in _list_services(SMOKE_TESTS):
- client = _get_client(session, service_name)
- for operation_name in SMOKE_TESTS[service_name]:
- kwargs = SMOKE_TESTS[service_name][operation_name]
- method_name = xform_name(operation_name)
- yield _make_client_call, client, method_name, kwargs
+ client = _get_client(session, service_name)
+ for operation_name in SMOKE_TESTS[service_name]:
+ kwargs = SMOKE_TESTS[service_name][operation_name]
+ method_name = xform_name(operation_name)
+ _make_client_call(client, method_name, kwargs)
def _make_client_call(client, operation_name, kwargs):
method = getattr(client, operation_name)
with warnings.catch_warnings(record=True) as caught_warnings:
response = method(**kwargs)
- assert len(caught_warnings) == 0, \
- "Warnings were emitted during smoke test: %s" % caught_warnings
+ msg = "Warnings were emitted during smoke test: %s" % caught_warnings
+ assert len(caught_warnings) == 0, msg
assert 'Errors' not in response
-def test_can_make_request_and_understand_errors_with_client():
+@pytest.mark.parametrize("service_name", _list_services(ERROR_TESTS))
+def test_can_make_request_and_understand_errors_with_client(service_name):
session = botocore.session.get_session()
- for service_name in _list_services(ERROR_TESTS):
- client = _get_client(session, service_name)
- for operation_name in ERROR_TESTS[service_name]:
- kwargs = ERROR_TESTS[service_name][operation_name]
- method_name = xform_name(operation_name)
- _make_error_client_call(client, method_name, kwargs)
+ client = _get_client(session, service_name)
+ for operation_name in ERROR_TESTS[service_name]:
+ kwargs = ERROR_TESTS[service_name][operation_name]
+ method_name = xform_name(operation_name)
+ _make_error_client_call(client, method_name, kwargs)
def _make_error_client_call(client, operation_name, kwargs):
@@ -285,14 +286,13 @@ def _make_error_client_call(client, operation_name, kwargs):
"for %s.%s" % (client, operation_name))
-def test_client_can_retry_request_properly():
+@pytest.mark.parametrize("service_name", _list_services(SMOKE_TESTS))
+def test_client_can_retry_request_properly(service_name):
session = botocore.session.get_session()
- for service_name in _list_services(SMOKE_TESTS):
- client = _get_client(session, service_name)
- for operation_name in SMOKE_TESTS[service_name]:
- kwargs = SMOKE_TESTS[service_name][operation_name]
- yield (_make_client_call_with_errors, client,
- operation_name, kwargs)
+ client = _get_client(session, service_name)
+ for operation_name in SMOKE_TESTS[service_name]:
+ kwargs = SMOKE_TESTS[service_name][operation_name]
+ _make_client_call_with_errors(client, operation_name, kwargs)
def _make_client_call_with_errors(client, operation_name, kwargs):
diff --git a/tests/integration/test_sts.py b/tests/integration/test_sts.py
index 33e3d0a2..b8ed45bc 100644
--- a/tests/integration/test_sts.py
+++ b/tests/integration/test_sts.py
@@ -11,6 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
+import pytest
import botocore.session
@@ -28,7 +29,7 @@ class TestSTS(unittest.TestCase):
sts = self.session.create_client('sts', region_name='ap-southeast-1')
response = sts.get_session_token()
# Do not want to be revealing any temporary keys if the assertion fails
- self.assertIn('Credentials', response.keys())
+ assert 'Credentials' in response.keys()
# Since we have to activate STS regionalization, we will test
# that you can send an STS request to a regionalized endpoint
@@ -36,9 +37,8 @@ class TestSTS(unittest.TestCase):
sts = self.session.create_client(
'sts', region_name='ap-southeast-1',
endpoint_url='https://sts.us-west-2.amazonaws.com')
- self.assertEqual(sts.meta.region_name, 'ap-southeast-1')
- self.assertEqual(sts.meta.endpoint_url,
- 'https://sts.us-west-2.amazonaws.com')
+ assert sts.meta.region_name == 'ap-southeast-1'
+ assert sts.meta.endpoint_url == 'https://sts.us-west-2.amazonaws.com'
# Signing error will be thrown with the incorrect region name included.
- with six.assertRaisesRegex(self, ClientError, 'ap-southeast-1'):
+ with pytest.raises(ClientError, match='ap-southeast-1'):
sts.get_session_token()
diff --git a/tests/integration/test_utils.py b/tests/integration/test_utils.py
index c0f47ffd..682a35c3 100644
--- a/tests/integration/test_utils.py
+++ b/tests/integration/test_utils.py
@@ -12,6 +12,7 @@
# language governing permissions and limitations under the License.
import botocore.session
from botocore.utils import ArgumentGenerator
+import pytest
class ArgumentGeneratorError(AssertionError):
@@ -23,7 +24,7 @@ class ArgumentGeneratorError(AssertionError):
super(AssertionError, self).__init__(full_msg)
-def test_can_generate_all_inputs():
+def _generate_all_inputs():
session = botocore.session.get_session()
generator = ArgumentGenerator()
for service_name in session.get_available_services():
@@ -31,9 +32,7 @@ def test_can_generate_all_inputs():
for operation_name in service_model.operation_names:
operation_model = service_model.operation_model(operation_name)
input_shape = operation_model.input_shape
- if input_shape is not None and input_shape.members:
- yield (_test_can_generate_skeleton, generator,
- input_shape, service_name, operation_name)
+ yield (generator,input_shape,service_name, operation_name)
def _test_can_generate_skeleton(generator, shape, service_name,
@@ -52,3 +51,10 @@ def _test_can_generate_skeleton(generator, shape, service_name,
raise ArgumentGeneratorError(
service_name, operation_name,
generated, "generated arguments were empty")
+
+
+@pytest.mark.parametrize("generator,input_shape,service_name,operation_name", _generate_all_inputs())
+def test_can_generate_all_inputs(generator, input_shape, service_name, operation_name):
+ if input_shape is not None and input_shape.members:
+ _test_can_generate_skeleton(generator,
+ input_shape, service_name, operation_name)
diff --git a/tests/integration/test_waiters.py b/tests/integration/test_waiters.py
index 99cef057..d9c8af28 100644
--- a/tests/integration/test_waiters.py
+++ b/tests/integration/test_waiters.py
@@ -38,7 +38,7 @@ class TestWaiterForDynamoDB(unittest.TestCase):
waiter = self.client.get_waiter('table_exists')
waiter.wait(TableName=table_name)
parsed = self.client.describe_table(TableName=table_name)
- self.assertEqual(parsed['Table']['TableStatus'], 'ACTIVE')
+ assert parsed['Table']['TableStatus'] == 'ACTIVE'
class TestCanGetWaitersThroughClientInterface(unittest.TestCase):
@@ -50,7 +50,7 @@ class TestCanGetWaitersThroughClientInterface(unittest.TestCase):
client = session.create_client('ses', 'us-east-1')
# If we have at least one waiter in the list, we know that we have
# actually loaded the waiters and this test has passed.
- self.assertTrue(len(client.waiter_names) > 0)
+ assert len(client.waiter_names) > 0
class TestMatchersWithErrors(unittest.TestCase):
@@ -63,5 +63,5 @@ class TestMatchersWithErrors(unittest.TestCase):
"""Test that InstanceExists can handle a nonexistent instance."""
waiter = self.client.get_waiter('instance_exists')
waiter.config.max_attempts = 1
- with self.assertRaises(WaiterError):
+ with pytest.raises(WaiterError):
waiter.wait(InstanceIds=['i-12345'])
--
2.29.2
From dbb3ed50c21e6e58ad2ddde1d5637ff35a60e53c Mon Sep 17 00:00:00 2001
From: Zidaan Dutta <ziddutta@amazon.com>
Date: Mon, 19 Oct 2020 19:25:24 -0400
Subject: [PATCH 08/14] pytest migration of botocore unit tests part1
---
tests/unit/__init__.py | 6 +-
tests/unit/auth/test_signers.py | 300 ++++++++----------
tests/unit/docs/__init__.py | 8 +-
tests/unit/docs/test_docs.py | 10 +-
tests/unit/docs/test_docstring.py | 15 +-
tests/unit/docs/test_example.py | 12 +-
tests/unit/docs/test_method.py | 7 +-
tests/unit/docs/test_params.py | 12 +-
tests/unit/docs/test_service.py | 7 +-
tests/unit/docs/test_utils.py | 54 ++--
.../response_parsing/test_response_parsing.py | 112 +++----
tests/unit/retries/test_adaptive.py | 8 +-
tests/unit/retries/test_bucket.py | 29 +-
tests/unit/retries/test_quota.py | 26 +-
tests/unit/retries/test_standard.py | 155 ++++-----
tests/unit/retries/test_throttling.py | 32 +-
16 files changed, 352 insertions(+), 441 deletions(-)
diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py
index 518b11dc..c471a98a 100644
--- a/tests/unit/__init__.py
+++ b/tests/unit/__init__.py
@@ -30,12 +30,12 @@ class BaseResponseTest(unittest.TestCase):
actual_metadata = actual.pop('ResponseMetadata', {})
expected_metadata = expected.pop('ResponseMetadata', {})
- self.assertEqual(actual, expected)
+ assert actual == expected
self.assert_dict_is_proper_subset(actual_metadata, expected_metadata)
def assert_dict_is_proper_subset(self, superset, subset):
"""
Asserts that a dictionary is a proper subset of another.
"""
- self.assertTrue(all((k in superset and superset[k] == v)
- for k, v in subset.items()))
+ assert all((k in superset and superset[k] == v)
+ for k, v in subset.items())
diff --git a/tests/unit/auth/test_signers.py b/tests/unit/auth/test_signers.py
index 77c41978..f19ec3c8 100644
--- a/tests/unit/auth/test_signers.py
+++ b/tests/unit/auth/test_signers.py
@@ -69,9 +69,9 @@ class TestHMACV1(unittest.TestCase):
"Thu, 17 Nov 2005 18:49:58 GMT\nx-amz-magic:abracadabra\n"
"x-amz-meta-author:foo@bar.com\n/quotes/nelson")
expected_signature = 'jZNOcbfWmD/A/f3hSvVzXZjM2HU='
- self.assertEqual(cs, expected_canonical)
+ assert cs == expected_canonical
sig = self.hmacv1.get_signature('PUT', split, http_headers)
- self.assertEqual(sig, expected_signature)
+ assert sig == expected_signature
def test_duplicate_headers(self):
pairs = [('Date', 'Thu, 17 Nov 2005 18:49:58 GMT'),
@@ -91,7 +91,7 @@ class TestHMACV1(unittest.TestCase):
pairs = [('Date', 'Thu, 17 Nov 2005 18:49:58 GMT')]
sig = self.hmacv1.get_signature('PUT', split,
HTTPHeaders.from_pairs(pairs))
- self.assertEqual(sig, 'P7pBz3Z4p3GxysRSJ/gR8nk7D4o=')
+ assert sig == 'P7pBz3Z4p3GxysRSJ/gR8nk7D4o='
def test_bucket_operations(self):
# Check that the standard operations on buckets that are
@@ -103,7 +103,7 @@ class TestHMACV1(unittest.TestCase):
url = '/quotes?%s' % operation
split = urlsplit(url)
cr = self.hmacv1.canonical_resource(split)
- self.assertEqual(cr, '/quotes?%s' % operation)
+ assert cr == '/quotes?%s' % operation
def test_sign_with_token(self):
credentials = botocore.credentials.Credentials(
@@ -115,10 +115,10 @@ class TestHMACV1(unittest.TestCase):
request.method = 'PUT'
request.url = 'https://s3.amazonaws.com/bucket/key'
auth.add_auth(request)
- self.assertIn('Authorization', request.headers)
+ assert 'Authorization' in request.headers
# We're not actually checking the signature here, we're
# just making sure the auth header has the right format.
- self.assertTrue(request.headers['Authorization'].startswith('AWS '))
+ assert request.headers['Authorization'].startswith('AWS ')
def test_resign_with_token(self):
credentials = botocore.credentials.Credentials(
@@ -138,8 +138,7 @@ class TestHMACV1(unittest.TestCase):
# another unit test that verifies we use the latest time
# when we sign the request.
auth.add_auth(request)
- self.assertEqual(request.headers.get_all('Authorization'),
- [original_auth])
+ assert request.headers.get_all('Authorization') == [original_auth]
def test_resign_uses_most_recent_date(self):
dates = [
@@ -161,8 +160,8 @@ class TestHMACV1(unittest.TestCase):
# Each time we sign a request, we make another call to formatdate()
# so we should have a different date header each time.
- self.assertEqual(original_date, dates[0])
- self.assertEqual(modified_date, dates[1])
+ assert original_date == dates[0]
+ assert modified_date == dates[1]
class TestSigV2(unittest.TestCase):
@@ -192,9 +191,8 @@ class TestSigV2(unittest.TestCase):
request.method = 'POST'
params = {'Foo': u'\u2713'}
result = self.signer.calc_signature(request, params)
- self.assertEqual(
- result, ('Foo=%E2%9C%93',
- u'VCtWuwaOL0yMffAT8W4y0AFW3W4KUykBqah9S40rB+Q='))
+ assert result == ('Foo=%E2%9C%93',
+ u'VCtWuwaOL0yMffAT8W4y0AFW3W4KUykBqah9S40rB+Q=')
def test_fields(self):
request = AWSRequest()
@@ -202,13 +200,12 @@ class TestSigV2(unittest.TestCase):
request.method = 'POST'
request.data = {'Foo': u'\u2713'}
self.signer.add_auth(request)
- self.assertEqual(request.data['AWSAccessKeyId'], 'foo')
- self.assertEqual(request.data['Foo'], u'\u2713')
- self.assertEqual(request.data['Timestamp'], '2014-06-20T08:40:23Z')
- self.assertEqual(request.data['Signature'],
- u'Tiecw+t51tok4dTT8B4bg47zxHEM/KcD55f2/x6K22o=')
- self.assertEqual(request.data['SignatureMethod'], 'HmacSHA256')
- self.assertEqual(request.data['SignatureVersion'], '2')
+ assert request.data['AWSAccessKeyId'] == 'foo'
+ assert request.data['Foo'] == u'\u2713'
+ assert request.data['Timestamp'] == '2014-06-20T08:40:23Z'
+ assert request.data['Signature'] == u'Tiecw+t51tok4dTT8B4bg47zxHEM/KcD55f2/x6K22o='
+ assert request.data['SignatureMethod'] == 'HmacSHA256'
+ assert request.data['SignatureVersion'] == '2'
def test_resign(self):
# Make sure that resigning after e.g. retries works
@@ -220,9 +217,8 @@ class TestSigV2(unittest.TestCase):
'Signature': u'VCtWuwaOL0yMffAT8W4y0AFW3W4KUykBqah9S40rB+Q='
}
result = self.signer.calc_signature(request, params)
- self.assertEqual(
- result, ('Foo=%E2%9C%93',
- u'VCtWuwaOL0yMffAT8W4y0AFW3W4KUykBqah9S40rB+Q='))
+ assert result == ('Foo=%E2%9C%93',
+ u'VCtWuwaOL0yMffAT8W4y0AFW3W4KUykBqah9S40rB+Q=')
def test_get(self):
request = AWSRequest()
@@ -230,13 +226,13 @@ class TestSigV2(unittest.TestCase):
request.method = 'GET'
request.params = {'Foo': u'\u2713'}
self.signer.add_auth(request)
- self.assertEqual(request.params['AWSAccessKeyId'], 'foo')
- self.assertEqual(request.params['Foo'], u'\u2713')
- self.assertEqual(request.params['Timestamp'], '2014-06-20T08:40:23Z')
- self.assertEqual(request.params['Signature'],
- u'Un97klqZCONP65bA1+Iv4H3AcB2I40I4DBvw5ZERFPw=')
- self.assertEqual(request.params['SignatureMethod'], 'HmacSHA256')
- self.assertEqual(request.params['SignatureVersion'], '2')
+ assert request.params['AWSAccessKeyId'] == 'foo'
+ assert request.params['Foo'] == u'\u2713'
+ assert request.params['Timestamp'] == '2014-06-20T08:40:23Z'
+ signature = u'Un97klqZCONP65bA1+Iv4H3AcB2I40I4DBvw5ZERFPw='
+ assert request.params['Signature'] == signature
+ assert request.params['SignatureMethod'] == 'HmacSHA256'
+ assert request.params['SignatureVersion'] == '2'
class TestSigV3(unittest.TestCase):
@@ -261,10 +257,9 @@ class TestSigV3(unittest.TestCase):
request.headers = {'Date': 'Thu, 17 Nov 2005 18:49:58 GMT'}
request.url = 'https://route53.amazonaws.com'
self.auth.add_auth(request)
- self.assertEqual(
- request.headers['X-Amzn-Authorization'],
- ('AWS3-HTTPS AWSAccessKeyId=access_key,Algorithm=HmacSHA256,'
- 'Signature=M245fo86nVKI8rLpH4HgWs841sBTUKuwciiTpjMDgPs='))
+ assert request.headers['X-Amzn-Authorization'] == (
+ 'AWS3-HTTPS AWSAccessKeyId=access_key,Algorithm=HmacSHA256,'
+ 'Signature=M245fo86nVKI8rLpH4HgWs841sBTUKuwciiTpjMDgPs=')
def test_resign_with_token(self):
credentials = botocore.credentials.Credentials(
@@ -279,8 +274,8 @@ class TestSigV3(unittest.TestCase):
# Resigning the request shouldn't change the authorization
# header.
auth.add_auth(request)
- self.assertEqual(request.headers.get_all('X-Amzn-Authorization'),
- [original_auth])
+ assert request.headers.get_all(
+ 'X-Amzn-Authorization') == [original_auth]
class TestS3SigV4Auth(BaseTestWithFixedDate):
@@ -310,8 +305,8 @@ class TestS3SigV4Auth(BaseTestWithFixedDate):
original_auth = self.request.headers['Authorization']
self.auth.add_auth(self.request)
- self.assertEqual(self.request.headers.get_all('Authorization'),
- [original_auth])
+ assert self.request.headers.get_all(
+ 'Authorization') == [original_auth]
def test_signature_is_not_normalized(self):
request = AWSRequest()
@@ -321,8 +316,8 @@ class TestS3SigV4Auth(BaseTestWithFixedDate):
'secret_key')
auth = botocore.auth.S3SigV4Auth(credentials, 's3', 'us-east-1')
auth.add_auth(request)
- self.assertTrue(
- request.headers['Authorization'].startswith('AWS4-HMAC-SHA256'))
+ assert request.headers['Authorization'].startswith('AWS4-HMAC-SHA256')
+
def test_query_string_params_in_urls(self):
request = AWSRequest()
@@ -337,7 +332,7 @@ class TestS3SigV4Auth(BaseTestWithFixedDate):
# by ensuring that query string paramters that are added to the
# canonical query string are correctly formatted.
cqs = self.auth.canonical_query_string(request)
- self.assertEqual('marker=%C3%A4%C3%B6%C3%BC-01.txt&prefix=', cqs)
+ assert cqs == 'marker=%C3%A4%C3%B6%C3%BC-01.txt&prefix='
def _test_blacklist_header(self, header, value):
request = AWSRequest()
@@ -348,7 +343,7 @@ class TestS3SigV4Auth(BaseTestWithFixedDate):
'secret_key')
auth = botocore.auth.S3SigV4Auth(credentials, 's3', 'us-east-1')
auth.add_auth(request)
- self.assertNotIn(header, request.headers['Authorization'])
+ assert header not in request.headers['Authorization']
def test_blacklist_expect_headers(self):
self._test_blacklist_header('expect', '100-continue')
@@ -364,19 +359,19 @@ class TestS3SigV4Auth(BaseTestWithFixedDate):
self.client_config.s3['payload_signing_enabled'] = True
self.auth.add_auth(self.request)
sha_header = self.request.headers['X-Amz-Content-SHA256']
- self.assertNotEqual(sha_header, 'UNSIGNED-PAYLOAD')
+ assert sha_header != 'UNSIGNED-PAYLOAD'
def test_does_not_use_sha256_if_config_value_is_false(self):
self.client_config.s3['payload_signing_enabled'] = False
self.auth.add_auth(self.request)
sha_header = self.request.headers['X-Amz-Content-SHA256']
- self.assertEqual(sha_header, 'UNSIGNED-PAYLOAD')
+ assert sha_header == 'UNSIGNED-PAYLOAD'
def test_uses_sha256_if_md5_unset(self):
self.request.context['has_streaming_input'] = True
self.auth.add_auth(self.request)
sha_header = self.request.headers['X-Amz-Content-SHA256']
- self.assertNotEqual(sha_header, 'UNSIGNED-PAYLOAD')
+ assert sha_header != 'UNSIGNED-PAYLOAD'
def test_uses_sha256_if_not_https(self):
self.request.context['has_streaming_input'] = True
@@ -384,7 +379,7 @@ class TestS3SigV4Auth(BaseTestWithFixedDate):
self.request.url = 'http://s3.amazonaws.com/bucket'
self.auth.add_auth(self.request)
sha_header = self.request.headers['X-Amz-Content-SHA256']
- self.assertNotEqual(sha_header, 'UNSIGNED-PAYLOAD')
+ assert sha_header != 'UNSIGNED-PAYLOAD'
def test_uses_sha256_if_not_streaming_upload(self):
self.request.context['has_streaming_input'] = False
@@ -392,21 +387,21 @@ class TestS3SigV4Auth(BaseTestWithFixedDate):
self.request.url = 'https://s3.amazonaws.com/bucket'
self.auth.add_auth(self.request)
sha_header = self.request.headers['X-Amz-Content-SHA256']
- self.assertNotEqual(sha_header, 'UNSIGNED-PAYLOAD')
+ assert sha_header != 'UNSIGNED-PAYLOAD'
def test_does_not_use_sha256_if_md5_set(self):
self.request.context['has_streaming_input'] = True
self.request.headers.add_header('Content-MD5', 'foo')
self.auth.add_auth(self.request)
sha_header = self.request.headers['X-Amz-Content-SHA256']
- self.assertEqual(sha_header, 'UNSIGNED-PAYLOAD')
+ assert sha_header == 'UNSIGNED-PAYLOAD'
def test_does_not_use_sha256_if_context_config_set(self):
self.request.context['payload_signing_enabled'] = False
self.request.headers.add_header('Content-MD5', 'foo')
self.auth.add_auth(self.request)
sha_header = self.request.headers['X-Amz-Content-SHA256']
- self.assertEqual(sha_header, 'UNSIGNED-PAYLOAD')
+ assert sha_header == 'UNSIGNED-PAYLOAD'
def test_sha256_if_context_set_on_http(self):
self.request.context['payload_signing_enabled'] = False
@@ -414,14 +409,14 @@ class TestS3SigV4Auth(BaseTestWithFixedDate):
self.request.url = 'http://s3.amazonaws.com/bucket'
self.auth.add_auth(self.request)
sha_header = self.request.headers['X-Amz-Content-SHA256']
- self.assertNotEqual(sha_header, 'UNSIGNED-PAYLOAD')
+ assert sha_header != 'UNSIGNED-PAYLOAD'
def test_sha256_if_context_set_without_md5(self):
self.request.context['payload_signing_enabled'] = False
self.request.url = 'https://s3.amazonaws.com/bucket'
self.auth.add_auth(self.request)
sha_header = self.request.headers['X-Amz-Content-SHA256']
- self.assertNotEqual(sha_header, 'UNSIGNED-PAYLOAD')
+ assert sha_header != 'UNSIGNED-PAYLOAD'
class TestSigV4(unittest.TestCase):
@@ -450,7 +445,7 @@ class TestSigV4(unittest.TestCase):
expected = ("format=sdk&pretty=true&q=George%20Lucas&q.options=%7B%22"
"defaultOperator%22%3A%20%22and%22%2C%20%22fields%22%3A%5B"
"%22directors%5E10%22%5D%7D")
- self.assertEqual(actual, expected)
+ assert actual == expected
def test_thread_safe_timestamp(self):
request = AWSRequest()
@@ -472,9 +467,9 @@ class TestSigV4(unittest.TestCase):
# Go through the add_auth process once. This will attach
# a timestamp to the request at the beginning of auth.
auth.add_auth(request)
- self.assertEqual(request.context['timestamp'], '20140101T000000Z')
+ assert request.context['timestamp'] == '20140101T000000Z'
# Ensure the date is in the Authorization header
- self.assertIn('20140101', request.headers['Authorization'])
+ assert '20140101' in request.headers['Authorization']
# Now suppose the utc time becomes the next day all of a sudden
mock_datetime.utcnow.return_value = datetime.datetime(
2014, 1, 2, 0, 0)
@@ -483,12 +478,12 @@ class TestSigV4(unittest.TestCase):
# body and not what the time is now mocked as. This is to ensure
# there is no mismatching in timestamps when signing.
cr = auth.canonical_request(request)
- self.assertIn('x-amz-date:20140101T000000Z', cr)
- self.assertNotIn('x-amz-date:20140102T000000Z', cr)
+ assert 'x-amz-date:20140101T000000Z' in cr
+ assert 'x-amz-date:20140102T000000Z' not in cr
sts = auth.string_to_sign(request, cr)
- self.assertIn('20140101T000000Z', sts)
- self.assertNotIn('20140102T000000Z', sts)
+ assert '20140101T000000Z' in sts
+ assert '20140102T000000Z' not in sts
def test_payload_is_binary_file(self):
request = AWSRequest()
@@ -496,9 +491,9 @@ class TestSigV4(unittest.TestCase):
request.url = 'https://amazonaws.com'
auth = self.create_signer()
payload = auth.payload(request)
- self.assertEqual(
- payload,
- '1dabba21cdad44541f6b15796f8d22978fc7ea10c46aeceeeeb66c23b3ac7604')
+ expected_payload = ('1dabba21cdad44541f6b15796f8d2297'
+ '8fc7ea10c46aeceeeeb66c23b3ac7604')
+ assert payload == expected_payload
def test_payload_is_bytes_type(self):
request = AWSRequest()
@@ -506,9 +501,9 @@ class TestSigV4(unittest.TestCase):
request.url = 'https://amazonaws.com'
auth = self.create_signer()
payload = auth.payload(request)
- self.assertEqual(
- payload,
- '1dabba21cdad44541f6b15796f8d22978fc7ea10c46aeceeeeb66c23b3ac7604')
+ expected_payload = ('1dabba21cdad44541f6b15796f8d2297'
+ '8fc7ea10c46aeceeeeb66c23b3ac7604')
+ assert payload == expected_payload
def test_payload_not_signed_if_disabled_in_context(self):
request = AWSRequest()
@@ -517,7 +512,7 @@ class TestSigV4(unittest.TestCase):
request.context['payload_signing_enabled'] = False
auth = self.create_signer()
payload = auth.payload(request)
- self.assertEqual(payload, 'UNSIGNED-PAYLOAD')
+ assert payload == 'UNSIGNED-PAYLOAD'
def test_content_sha256_set_if_payload_signing_disabled(self):
request = AWSRequest()
@@ -528,21 +523,21 @@ class TestSigV4(unittest.TestCase):
auth = self.create_signer()
auth.add_auth(request)
sha_header = request.headers['X-Amz-Content-SHA256']
- self.assertEqual(sha_header, 'UNSIGNED-PAYLOAD')
+ assert sha_header == 'UNSIGNED-PAYLOAD'
def test_collapse_multiple_spaces(self):
auth = self.create_signer()
original = HTTPHeaders()
original['foo'] = 'double space'
headers = auth.canonical_headers(original)
- self.assertEqual(headers, 'foo:double space')
+ assert headers == 'foo:double space'
def test_trims_leading_trailing_spaces(self):
auth = self.create_signer()
original = HTTPHeaders()
original['foo'] = ' leading and trailing '
headers = auth.canonical_headers(original)
- self.assertEqual(headers, 'foo:leading and trailing')
+ assert headers == 'foo:leading and trailing'
def test_strips_http_default_port(self):
request = AWSRequest()
@@ -551,7 +546,7 @@ class TestSigV4(unittest.TestCase):
auth = self.create_signer('s3', 'us-west-2')
actual = auth.headers_to_sign(request)['host']
expected = 's3.us-west-2.amazonaws.com'
- self.assertEqual(actual, expected)
+ assert actual == expected
def test_strips_https_default_port(self):
request = AWSRequest()
@@ -560,7 +555,7 @@ class TestSigV4(unittest.TestCase):
auth = self.create_signer('s3', 'us-west-2')
actual = auth.headers_to_sign(request)['host']
expected = 's3.us-west-2.amazonaws.com'
- self.assertEqual(actual, expected)
+ assert actual == expected
def test_strips_http_auth(self):
request = AWSRequest()
@@ -569,7 +564,7 @@ class TestSigV4(unittest.TestCase):
auth = self.create_signer('s3', 'us-west-2')
actual = auth.headers_to_sign(request)['host']
expected = 's3.us-west-2.amazonaws.com'
- self.assertEqual(actual, expected)
+ assert actual == expected
def test_strips_default_port_and_http_auth(self):
request = AWSRequest()
@@ -578,7 +573,7 @@ class TestSigV4(unittest.TestCase):
auth = self.create_signer('s3', 'us-west-2')
actual = auth.headers_to_sign(request)['host']
expected = 's3.us-west-2.amazonaws.com'
- self.assertEqual(actual, expected)
+ assert actual == expected
class TestSigV4Resign(BaseTestWithFixedDate):
@@ -601,16 +596,16 @@ class TestSigV4Resign(BaseTestWithFixedDate):
original_auth = self.request.headers['Authorization']
self.auth.add_auth(self.request)
- self.assertEqual(self.request.headers.get_all('Authorization'),
- [original_auth])
+ assert self.request.headers.get_all(
+ 'Authorization') == [original_auth]
def test_sigv4_without_date(self):
self.auth.add_auth(self.request)
original_auth = self.request.headers['Authorization']
self.auth.add_auth(self.request)
- self.assertEqual(self.request.headers.get_all('Authorization'),
- [original_auth])
+ assert self.request.headers.get_all(
+ 'Authorization') == [original_auth]
class BasePresignTest(unittest.TestCase):
@@ -660,49 +655,45 @@ class TestS3SigV2Presign(BasePresignTest):
query_string = self.get_parsed_query_string(self.request)
# We should have still kept the response-content-disposition
# in the query string.
- self.assertIn('response-content-disposition', query_string)
- self.assertEqual(query_string['response-content-disposition'],
- 'attachment; filename="download.jpg"')
+ assert 'response-content-disposition' in query_string
+ assert query_string[
+ 'response-content-disposition'] == 'attachment; filename="download.jpg"'
# But we should have also added the parts from the signer.
- self.assertEqual(query_string['AWSAccessKeyId'], self.access_key)
+ assert query_string['AWSAccessKeyId'] == self.access_key
def test_presign_no_headers(self):
self.auth.add_auth(self.request)
- self.assertTrue(self.request.url.startswith(self.path + '?'))
+ assert self.request.url.startswith(self.path + '?')
query_string = self.get_parsed_query_string(self.request)
- self.assertEqual(query_string['AWSAccessKeyId'], self.access_key)
- self.assertEqual(query_string['Expires'],
- str(int(self.current_epoch_time) + self.expires))
- self.assertEqual(query_string['Signature'],
- 'ZRSgywstwIruKLTLt/Bcrf9H1K4=')
+ assert query_string['AWSAccessKeyId'] == self.access_key
+ expected_expiry = str(int(self.current_epoch_time) + self.expires)
+ assert query_string['Expires'] == expected_expiry
+ assert query_string['Signature'] == 'ZRSgywstwIruKLTLt/Bcrf9H1K4='
def test_presign_with_x_amz_headers(self):
self.request.headers['x-amz-security-token'] = 'foo'
self.request.headers['x-amz-acl'] = 'read-only'
self.auth.add_auth(self.request)
query_string = self.get_parsed_query_string(self.request)
- self.assertEqual(query_string['x-amz-security-token'], 'foo')
- self.assertEqual(query_string['x-amz-acl'], 'read-only')
- self.assertEqual(query_string['Signature'],
- '5oyMAGiUk1E5Ry2BnFr6cIS3Gus=')
+ assert query_string['x-amz-security-token'] == 'foo'
+ assert query_string['x-amz-acl'] == 'read-only'
+ assert query_string['Signature'] == '5oyMAGiUk1E5Ry2BnFr6cIS3Gus='
def test_presign_with_content_headers(self):
self.request.headers['content-type'] = 'txt'
self.request.headers['content-md5'] = 'foo'
self.auth.add_auth(self.request)
query_string = self.get_parsed_query_string(self.request)
- self.assertEqual(query_string['content-type'], 'txt')
- self.assertEqual(query_string['content-md5'], 'foo')
- self.assertEqual(query_string['Signature'],
- '/YQRFdQGywXP74WrOx2ET/RUqz8=')
+ assert query_string['content-type'] == 'txt'
+ assert query_string['content-md5'] == 'foo'
+ assert query_string['Signature'] == '/YQRFdQGywXP74WrOx2ET/RUqz8='
def test_presign_with_unused_headers(self):
self.request.headers['user-agent'] = 'botocore'
self.auth.add_auth(self.request)
query_string = self.get_parsed_query_string(self.request)
- self.assertNotIn('user-agent', query_string)
- self.assertEqual(query_string['Signature'],
- 'ZRSgywstwIruKLTLt/Bcrf9H1K4=')
+ assert 'user-agent' not in query_string
+ assert query_string['Signature'] == 'ZRSgywstwIruKLTLt/Bcrf9H1K4='
class TestSigV4Presign(BasePresignTest):
@@ -735,16 +726,14 @@ class TestSigV4Presign(BasePresignTest):
request.url = 'https://ec2.us-east-1.amazonaws.com/'
self.auth.add_auth(request)
query_string = self.get_parsed_query_string(request)
- self.assertEqual(
- query_string,
- {'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
+ assert query_string == {'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
'X-Amz-Credential': ('access_key/20140101/myregion/'
'myservice/aws4_request'),
'X-Amz-Date': '20140101T000000Z',
'X-Amz-Expires': '60',
'X-Amz-Signature': ('c70e0bcdb4cd3ee324f71c78195445b878'
'8315af0800bbbdbbb6d05a616fb84c'),
- 'X-Amz-SignedHeaders': 'host'})
+ 'X-Amz-SignedHeaders': 'host'}
def test_operation_params_before_auth_params(self):
# The spec is picky about this.
@@ -753,8 +742,7 @@ class TestSigV4Presign(BasePresignTest):
request.url = 'https://ec2.us-east-1.amazonaws.com/?Action=MyOperation'
self.auth.add_auth(request)
# Verify auth params come after the existing params.
- self.assertIn(
- '?Action=MyOperation&X-Amz', request.url)
+ assert '?Action=MyOperation&X-Amz' in request.url
def test_operation_params_before_auth_params_in_body(self):
request = AWSRequest()
@@ -764,8 +752,7 @@ class TestSigV4Presign(BasePresignTest):
self.auth.add_auth(request)
# Same situation, the params from request.data come before the auth
# params in the query string.
- self.assertIn(
- '?Action=MyOperation&X-Amz', request.url)
+ assert '?Action=MyOperation&X-Amz' in request.url
def test_presign_with_spaces_in_param(self):
request = AWSRequest()
@@ -774,7 +761,7 @@ class TestSigV4Presign(BasePresignTest):
request.data = {'Action': 'MyOperation', 'Description': 'With Spaces'}
self.auth.add_auth(request)
# Verify we encode spaces as '%20, and we don't use '+'.
- self.assertIn('Description=With%20Spaces', request.url)
+ assert 'Description=With%20Spaces' in request.url
def test_presign_with_empty_param_value(self):
request = AWSRequest()
@@ -783,7 +770,7 @@ class TestSigV4Presign(BasePresignTest):
request.url = 'https://s3.amazonaws.com/mybucket/mykey?uploads'
self.auth.add_auth(request)
# verify that uploads param is still in URL
- self.assertIn('uploads', request.url)
+ assert 'uploads' in request.url
def test_s3_sigv4_presign(self):
auth = botocore.auth.S3SigV4QueryAuth(
@@ -795,18 +782,17 @@ class TestSigV4Presign(BasePresignTest):
auth.add_auth(request)
query_string = self.get_parsed_query_string(request)
# We use a different payload:
- self.assertEqual(auth.payload(request), 'UNSIGNED-PAYLOAD')
+ assert auth.payload(request) == 'UNSIGNED-PAYLOAD'
# which will result in a different X-Amz-Signature:
- self.assertEqual(
- query_string,
- {'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
- 'X-Amz-Credential': ('access_key/20140101/myregion/'
- 'myservice/aws4_request'),
- 'X-Amz-Date': '20140101T000000Z',
- 'X-Amz-Expires': '60',
- 'X-Amz-Signature': ('ac1b8b9e47e8685c5c963d75e35e8741d55251'
- 'cd955239cc1efad4dc7201db66'),
- 'X-Amz-SignedHeaders': 'host'})
+ assert query_string == {
+ 'X-Amz-Algorithm': 'AWS4-HMAC-SHA256',
+ 'X-Amz-Credential': ('access_key/20140101/myregion/'
+ 'myservice/aws4_request'),
+ 'X-Amz-Date': '20140101T000000Z',
+ 'X-Amz-Expires': '60',
+ 'X-Amz-Signature': ('ac1b8b9e47e8685c5c963d75e35e8741d55251'
+ 'cd955239cc1efad4dc7201db66'),
+ 'X-Amz-SignedHeaders': 'host'}
def test_presign_with_security_token(self):
self.credentials.token = 'security-token'
@@ -817,8 +803,7 @@ class TestSigV4Presign(BasePresignTest):
request.url = 'https://ec2.us-east-1.amazonaws.com/'
auth.add_auth(request)
query_string = self.get_parsed_query_string(request)
- self.assertEqual(
- query_string['X-Amz-Security-Token'], 'security-token')
+ assert query_string['X-Amz-Security-Token'] == 'security-token'
def test_presign_where_body_is_json_bytes(self):
request = AWSRequest()
@@ -839,7 +824,7 @@ class TestSigV4Presign(BasePresignTest):
'X-Amz-SignedHeaders': 'host',
'Param': 'value'
}
- self.assertEqual(query_string, expected_query_string)
+ assert query_string == expected_query_string
def test_presign_where_body_is_json_string(self):
request = AWSRequest()
@@ -860,7 +845,7 @@ class TestSigV4Presign(BasePresignTest):
'X-Amz-SignedHeaders': 'host',
'Param': 'value'
}
- self.assertEqual(query_string, expected_query_string)
+ assert query_string == expected_query_string
def test_presign_content_type_form_encoded_not_signed(self):
request = AWSRequest()
@@ -872,7 +857,7 @@ class TestSigV4Presign(BasePresignTest):
self.auth.add_auth(request)
query_string = self.get_parsed_query_string(request)
signed_headers = query_string.get('X-Amz-SignedHeaders')
- self.assertNotIn('content-type', signed_headers)
+ assert 'content-type' not in signed_headers
class BaseS3PresignPostTest(unittest.TestCase):
@@ -924,26 +909,23 @@ class TestS3SigV2Post(BaseS3PresignPostTest):
def test_presign_post(self):
self.auth.add_auth(self.request)
result_fields = self.request.context['s3-presign-post-fields']
- self.assertEqual(
- result_fields['AWSAccessKeyId'], self.credentials.access_key)
+ assert result_fields['AWSAccessKeyId'] == self.credentials.access_key
result_policy = json.loads(base64.b64decode(
result_fields['policy']).decode('utf-8'))
- self.assertEqual(result_policy['expiration'],
- '2007-12-01T12:00:00.000Z')
- self.assertEqual(
- result_policy['conditions'],
- [{"acl": "public-read"},
+ assert result_policy['expiration'] == '2007-12-01T12:00:00.000Z'
+ assert result_policy['conditions'] == [
+ {"acl": "public-read"},
{"bucket": "mybucket"},
- ["starts-with", "$key", "mykey"]])
- self.assertIn('signature', result_fields)
+ ["starts-with", "$key", "mykey"]]
+ assert 'signature' in result_fields
def test_presign_post_with_security_token(self):
self.credentials.token = 'my-token'
self.auth = botocore.auth.HmacV1PostAuth(self.credentials)
self.auth.add_auth(self.request)
result_fields = self.request.context['s3-presign-post-fields']
- self.assertEqual(result_fields['x-amz-security-token'], 'my-token')
+ assert result_fields['x-amz-security-token'] == 'my-token'
def test_empty_fields_and_policy(self):
self.request = AWSRequest()
@@ -952,12 +934,11 @@ class TestS3SigV2Post(BaseS3PresignPostTest):
self.auth.add_auth(self.request)
result_fields = self.request.context['s3-presign-post-fields']
- self.assertEqual(
- result_fields['AWSAccessKeyId'], self.credentials.access_key)
+ assert result_fields['AWSAccessKeyId'] == self.credentials.access_key
result_policy = json.loads(base64.b64decode(
result_fields['policy']).decode('utf-8'))
- self.assertEqual(result_policy['conditions'], [])
- self.assertIn('signature', result_fields)
+ assert result_policy['conditions'] == []
+ assert 'signature' in result_fields
class TestS3SigV4Post(BaseS3PresignPostTest):
@@ -979,27 +960,22 @@ class TestS3SigV4Post(BaseS3PresignPostTest):
def test_presign_post(self):
self.auth.add_auth(self.request)
result_fields = self.request.context['s3-presign-post-fields']
- self.assertEqual(result_fields['x-amz-algorithm'], 'AWS4-HMAC-SHA256')
- self.assertEqual(
- result_fields['x-amz-credential'],
- 'access_key/20140101/myregion/myservice/aws4_request')
- self.assertEqual(
- result_fields['x-amz-date'],
- '20140101T000000Z')
+ assert result_fields['x-amz-algorithm'] == 'AWS4-HMAC-SHA256'
+ expected_credential = 'access_key/20140101/myregion/myservice/aws4_request'
+ assert result_fields['x-amz-credential'] == expected_credential
+ assert result_fields['x-amz-date'] == '20140101T000000Z'
result_policy = json.loads(base64.b64decode(
result_fields['policy']).decode('utf-8'))
- self.assertEqual(result_policy['expiration'],
- '2007-12-01T12:00:00.000Z')
- self.assertEqual(
- result_policy['conditions'],
- [{"acl": "public-read"}, {"bucket": "mybucket"},
+ assert result_policy['expiration'] == '2007-12-01T12:00:00.000Z'
+ assert result_policy['conditions'] == [
+ {"acl": "public-read"}, {"bucket": "mybucket"},
["starts-with", "$key", "mykey"],
{"x-amz-algorithm": "AWS4-HMAC-SHA256"},
{"x-amz-credential":
"access_key/20140101/myregion/myservice/aws4_request"},
- {"x-amz-date": "20140101T000000Z"}])
- self.assertIn('x-amz-signature', result_fields)
+ {"x-amz-date": "20140101T000000Z"}]
+ assert 'x-amz-signature' in result_fields
def test_presign_post_with_security_token(self):
self.credentials.token = 'my-token'
@@ -1007,7 +983,7 @@ class TestS3SigV4Post(BaseS3PresignPostTest):
self.credentials, self.service_name, self.region_name)
self.auth.add_auth(self.request)
result_fields = self.request.context['s3-presign-post-fields']
- self.assertEqual(result_fields['x-amz-security-token'], 'my-token')
+ assert result_fields['x-amz-security-token'] == 'my-token'
def test_empty_fields_and_policy(self):
self.request = AWSRequest()
@@ -1016,20 +992,16 @@ class TestS3SigV4Post(BaseS3PresignPostTest):
self.auth.add_auth(self.request)
result_fields = self.request.context['s3-presign-post-fields']
- self.assertEqual(result_fields['x-amz-algorithm'], 'AWS4-HMAC-SHA256')
- self.assertEqual(
- result_fields['x-amz-credential'],
- 'access_key/20140101/myregion/myservice/aws4_request')
- self.assertEqual(
- result_fields['x-amz-date'],
- '20140101T000000Z')
+ assert result_fields['x-amz-algorithm'] == 'AWS4-HMAC-SHA256'
+ expected_credential = 'access_key/20140101/myregion/myservice/aws4_request'
+ assert result_fields['x-amz-credential'] == expected_credential
+ assert result_fields['x-amz-date'] == '20140101T000000Z'
result_policy = json.loads(base64.b64decode(
result_fields['policy']).decode('utf-8'))
- self.assertEqual(
- result_policy['conditions'],
- [{"x-amz-algorithm": "AWS4-HMAC-SHA256"},
+ assert result_policy['conditions'] == [
+ {"x-amz-algorithm": "AWS4-HMAC-SHA256"},
{"x-amz-credential":
"access_key/20140101/myregion/myservice/aws4_request"},
- {"x-amz-date": "20140101T000000Z"}])
- self.assertIn('x-amz-signature', result_fields)
+ {"x-amz-date": "20140101T000000Z"}]
+ assert 'x-amz-signature' in result_fields
diff --git a/tests/unit/docs/__init__.py b/tests/unit/docs/__init__.py
index 16625c1c..c975f411 100644
--- a/tests/unit/docs/__init__.py
+++ b/tests/unit/docs/__init__.py
@@ -200,20 +200,20 @@ class BaseDocsTest(unittest.TestCase):
def assert_contains_line(self, line):
contents = self.doc_structure.flush_structure().decode('utf-8')
- self.assertIn(line, contents)
+ assert line in contents
def assert_contains_lines_in_order(self, lines):
contents = self.doc_structure.flush_structure().decode('utf-8')
for line in lines:
- self.assertIn(line, contents)
+ assert line in contents
beginning = contents.find(line)
contents = contents[(beginning + len(line)):]
def assert_not_contains_line(self, line):
contents = self.doc_structure.flush_structure().decode('utf-8')
- self.assertNotIn(line, contents)
+ assert line not in contents
def assert_not_contains_lines(self, lines):
contents = self.doc_structure.flush_structure().decode('utf-8')
for line in lines:
- self.assertNotIn(line, contents)
+ assert line not in contents
diff --git a/tests/unit/docs/test_docs.py b/tests/unit/docs/test_docs.py
index 3582f344..90a7ffff 100644
--- a/tests/unit/docs/test_docs.py
+++ b/tests/unit/docs/test_docs.py
@@ -48,12 +48,12 @@ class TestGenerateDocs(BaseDocsTest):
self.docs_root, 'reference', 'services')
reference_service_path = os.path.join(
reference_services_path, 'myservice.rst')
- self.assertTrue(os.path.exists(reference_service_path))
+ assert os.path.exists(reference_service_path)
# Make sure the rst file has some the expected contents.
with open(reference_service_path, 'r') as f:
contents = f.read()
- self.assertIn('AWS MyService', contents)
- self.assertIn('Client', contents)
- self.assertIn('Paginators', contents)
- self.assertIn('Waiters', contents)
+ assert 'AWS MyService' in contents
+ assert 'Client' in contents
+ assert 'Paginators' in contents
+ assert 'Waiters' in contents
diff --git a/tests/unit/docs/test_docstring.py b/tests/unit/docs/test_docstring.py
index 01adb4af..d8306c98 100644
--- a/tests/unit/docs/test_docstring.py
+++ b/tests/unit/docs/test_docstring.py
@@ -10,6 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import pytest
from tests import unittest, mock
from botocore.docs.docstring import LazyLoadedDocstring
from botocore.docs.docstring import ClientMethodDocstring
@@ -28,26 +29,26 @@ class MockedLazyLoadedDocstring(LazyLoadedDocstring):
class TestLazyLoadedDocstring(unittest.TestCase):
def test_raises_not_implemented(self):
- with self.assertRaises(NotImplementedError):
+ with pytest.raises(NotImplementedError):
str(LazyLoadedDocstring())
def test_expandtabs(self):
docstring = MockedLazyLoadedDocstring()
docstring.mocked_writer_method.side_effect = (
lambda section: section.write('foo\t'))
- self.assertEqual('foo ', docstring.expandtabs(1))
+ assert 'foo ' == docstring.expandtabs(1)
def test_str(self):
docstring = MockedLazyLoadedDocstring()
docstring.mocked_writer_method.side_effect = (
lambda section: section.write('foo'))
- self.assertEqual('foo', str(docstring))
+ assert 'foo' == str(docstring)
def test_repr(self):
docstring = MockedLazyLoadedDocstring()
docstring.mocked_writer_method.side_effect = (
lambda section: section.write('foo'))
- self.assertEqual('foo', repr(docstring))
+ assert 'foo' == repr(docstring)
def test_is_lazy_loaded(self):
docstring = MockedLazyLoadedDocstring()
@@ -73,7 +74,7 @@ class TestClientMethodDocstring(unittest.TestCase):
'.document_model_driven_method') as mock_writer:
docstring = ClientMethodDocstring()
str(docstring)
- self.assertTrue(mock_writer.called)
+ assert mock_writer.called
class TestWaiterDocstring(unittest.TestCase):
@@ -83,7 +84,7 @@ class TestWaiterDocstring(unittest.TestCase):
'.document_wait_method') as mock_writer:
docstring = WaiterDocstring()
str(docstring)
- self.assertTrue(mock_writer.called)
+ assert mock_writer.called
class TestPaginatorDocstring(unittest.TestCase):
@@ -93,4 +94,4 @@ class TestPaginatorDocstring(unittest.TestCase):
'.document_paginate_method') as mock_writer:
docstring = PaginatorDocstring()
str(docstring)
- self.assertTrue(mock_writer.called)
+ assert mock_writer.called
diff --git a/tests/unit/docs/test_example.py b/tests/unit/docs/test_example.py
index 677fd769..8258239b 100644
--- a/tests/unit/docs/test_example.py
+++ b/tests/unit/docs/test_example.py
@@ -103,14 +103,12 @@ class TestTraverseAndDocumentShape(BaseExampleDocumenterTest):
)
structure_section = self.doc_structure.get_section('structure-value')
print(self.event_emitter.emit.call_args_list[0][1]['section'].name)
- self.assertEqual(
- self.event_emitter.emit.call_args_list,
- [mock.call('docs.response-example.myservice.SampleOperation.Foo',
+ assert self.event_emitter.emit.call_args_list == [
+ mock.call('docs.response-example.myservice.SampleOperation.Foo',
section=structure_section.get_section(
'Foo').get_section('member-value')),
mock.call(('docs.response-example.myservice.SampleOperation'
'.complete-section'), section=self.doc_structure)]
- )
def test_events_emitted_request_example(self):
self.request_example.traverse_and_document_shape(
@@ -118,14 +116,12 @@ class TestTraverseAndDocumentShape(BaseExampleDocumenterTest):
shape=self.operation_model.input_shape, history=[]
)
structure_section = self.doc_structure.get_section('structure-value')
- self.assertEqual(
- self.event_emitter.emit.call_args_list,
- [mock.call('docs.request-example.myservice.SampleOperation.Foo',
+ assert self.event_emitter.emit.call_args_list == [
+ mock.call('docs.request-example.myservice.SampleOperation.Foo',
section=structure_section.get_section(
'Foo').get_section('member-value')),
mock.call(('docs.request-example.myservice.SampleOperation'
'.complete-section'), section=self.doc_structure)]
- )
class TestDocumentEnumValue(BaseExampleDocumenterTest):
diff --git a/tests/unit/docs/test_method.py b/tests/unit/docs/test_method.py
index 9f9077bb..a720e01f 100644
--- a/tests/unit/docs/test_method.py
+++ b/tests/unit/docs/test_method.py
@@ -32,10 +32,9 @@ class TestGetInstanceMethods(unittest.TestCase):
def test_get_instance_methods(self):
instance = self.MySampleClass()
instance_methods = get_instance_public_methods(instance)
- self.assertEqual(len(instance_methods), 1)
- self.assertIn('public_method', instance_methods)
- self.assertEqual(
- instance.public_method, instance_methods['public_method'])
+ assert len(instance_methods) == 1
+ assert 'public_method' in instance_methods
+ assert instance.public_method == instance_methods['public_method']
class TestDocumentModelDrivenSignature(BaseDocsTest):
diff --git a/tests/unit/docs/test_params.py b/tests/unit/docs/test_params.py
index 8bb8d429..b221f5dd 100644
--- a/tests/unit/docs/test_params.py
+++ b/tests/unit/docs/test_params.py
@@ -70,26 +70,22 @@ class TestTraverseAndDocumentShape(BaseParamsDocumenterTest):
section=self.doc_structure,
shape=self.operation_model.input_shape, history=[]
)
- self.assertEqual(
- self.event_emitter.emit.call_args_list,
- [mock.call('docs.response-params.myservice.SampleOperation.Foo',
+ assert self.event_emitter.emit.call_args_list == [
+ mock.call('docs.response-params.myservice.SampleOperation.Foo',
section=self.doc_structure.get_section('Foo')),
mock.call(('docs.response-params.myservice.SampleOperation'
'.complete-section'), section=self.doc_structure)]
- )
def test_events_emitted_request_params(self):
self.request_params.traverse_and_document_shape(
section=self.doc_structure,
shape=self.operation_model.input_shape, history=[]
)
- self.assertEqual(
- self.event_emitter.emit.call_args_list,
- [mock.call('docs.request-params.myservice.SampleOperation.Foo',
+ assert self.event_emitter.emit.call_args_list == [
+ mock.call('docs.request-params.myservice.SampleOperation.Foo',
section=self.doc_structure.get_section('Foo')),
mock.call(('docs.request-params.myservice.SampleOperation'
'.complete-section'), section=self.doc_structure)]
- )
class TestDocumentMultipleDefaultValues(BaseParamsDocumenterTest):
diff --git a/tests/unit/docs/test_service.py b/tests/unit/docs/test_service.py
index f768626c..cc114a88 100644
--- a/tests/unit/docs/test_service.py
+++ b/tests/unit/docs/test_service.py
@@ -12,6 +12,7 @@
# language governing permissions and limitations under the License.
import os
+import pytest
from tests import mock
from tests.unit.docs import BaseDocsTest
@@ -70,14 +71,14 @@ class TestServiceDocumenter(BaseDocsTest):
' .. py:method:: wait(**kwargs)'
]
for line in lines:
- self.assertIn(line, contents)
+ assert line in contents
def test_document_service_no_paginator(self):
os.remove(self.paginator_model_file)
contents = self.service_documenter.document_service().decode('utf-8')
- self.assertNotIn('Paginators', contents)
+ assert 'Paginators' not in contents
def test_document_service_no_waiter(self):
os.remove(self.waiter_model_file)
contents = self.service_documenter.document_service().decode('utf-8')
- self.assertNotIn('Waiters', contents)
+ assert 'Waiters' not in contents
diff --git a/tests/unit/docs/test_utils.py b/tests/unit/docs/test_utils.py
index b3dae2b7..5117a9be 100644
--- a/tests/unit/docs/test_utils.py
+++ b/tests/unit/docs/test_utils.py
@@ -23,66 +23,66 @@ from botocore.docs.utils import escape_controls
class TestPythonTypeName(unittest.TestCase):
def test_structure(self):
- self.assertEqual('dict', py_type_name('structure'))
+ assert 'dict' == py_type_name('structure')
def test_list(self):
- self.assertEqual('list', py_type_name('list'))
+ assert 'list' == py_type_name('list')
def test_map(self):
- self.assertEqual('dict', py_type_name('map'))
+ assert 'dict' == py_type_name('map')
def test_string(self):
- self.assertEqual('string', py_type_name('string'))
+ assert 'string' == py_type_name('string')
def test_character(self):
- self.assertEqual('string', py_type_name('character'))
+ assert 'string' == py_type_name('character')
def test_blob(self):
- self.assertEqual('bytes', py_type_name('blob'))
+ assert 'bytes' == py_type_name('blob')
def test_timestamp(self):
- self.assertEqual('datetime', py_type_name('timestamp'))
+ assert 'datetime' == py_type_name('timestamp')
def test_integer(self):
- self.assertEqual('integer', py_type_name('integer'))
+ assert 'integer' == py_type_name('integer')
def test_long(self):
- self.assertEqual('integer', py_type_name('long'))
+ assert 'integer' == py_type_name('long')
def test_float(self):
- self.assertEqual('float', py_type_name('float'))
+ assert 'float' == py_type_name('float')
def test_double(self):
- self.assertEqual('float', py_type_name('double'))
+ assert 'float' == py_type_name('double')
class TestPythonDefault(unittest.TestCase):
def test_structure(self):
- self.assertEqual('{...}', py_default('structure'))
+ assert '{...}' == py_default('structure')
def test_list(self):
- self.assertEqual('[...]', py_default('list'))
+ assert '[...]' == py_default('list')
def test_map(self):
- self.assertEqual('{...}', py_default('map'))
+ assert '{...}' == py_default('map')
def test_string(self):
- self.assertEqual('\'string\'', py_default('string'))
+ assert '\'string\'' == py_default('string')
def test_blob(self):
- self.assertEqual('b\'bytes\'', py_default('blob'))
+ assert 'b\'bytes\'' == py_default('blob')
def test_timestamp(self):
- self.assertEqual('datetime(2015, 1, 1)', py_default('timestamp'))
+ assert 'datetime(2015, 1, 1)' == py_default('timestamp')
def test_integer(self):
- self.assertEqual('123', py_default('integer'))
+ assert '123' == py_default('integer')
def test_long(self):
- self.assertEqual('123', py_default('long'))
+ assert '123' == py_default('long')
def test_double(self):
- self.assertEqual('123.0', py_default('double'))
+ assert '123.0' == py_default('double')
class TestGetOfficialServiceName(BaseDocsTest):
@@ -93,24 +93,20 @@ class TestGetOfficialServiceName(BaseDocsTest):
}
def test_no_short_name(self):
- self.assertEqual('Official Name',
- get_official_service_name(self.service_model))
+ assert 'Official Name' == get_official_service_name(self.service_model)
def test_aws_short_name(self):
self.service_model.metadata['serviceAbbreviation'] = 'AWS Foo'
- self.assertEqual('Official Name (Foo)',
- get_official_service_name(self.service_model))
+ assert 'Official Name (Foo)' == get_official_service_name(self.service_model)
def test_amazon_short_name(self):
self.service_model.metadata['serviceAbbreviation'] = 'Amazon Foo'
- self.assertEqual('Official Name (Foo)',
- get_official_service_name(self.service_model))
+ assert 'Official Name (Foo)' == get_official_service_name(self.service_model)
def test_short_name_in_official_name(self):
self.service_model.metadata['serviceFullName'] = 'The Foo Service'
self.service_model.metadata['serviceAbbreviation'] = 'Amazon Foo'
- self.assertEqual('The Foo Service',
- get_official_service_name(self.service_model))
+ assert 'The Foo Service' == get_official_service_name(self.service_model)
class TestAutopopulatedParam(BaseDocsTest):
@@ -223,5 +219,5 @@ class TestAppendParamDocumentation(BaseDocsTest):
class TestEscapeControls(unittest.TestCase):
def test_escapes_controls(self):
escaped = escape_controls('\na\rb\tc\fd\be')
- self.assertEqual(escaped, '\\na\\rb\\tc\\fd\\be')
+ assert escaped == '\\na\\rb\\tc\\fd\\be'
diff --git a/tests/unit/response_parsing/test_response_parsing.py b/tests/unit/response_parsing/test_response_parsing.py
index 421326b6..cecd4521 100644
--- a/tests/unit/response_parsing/test_response_parsing.py
+++ b/tests/unit/response_parsing/test_response_parsing.py
@@ -17,6 +17,8 @@ import json
import pprint
import logging
import difflib
+import pytest
+import itertools
from tests import create_session
import botocore.session
@@ -35,54 +37,7 @@ SPECIAL_CASES = [
]
-def _test_parsed_response(xmlfile, response_body, operation_model, expected):
- response = {
- 'body': response_body,
- 'status_code': 200,
- 'headers': {}
- }
- for case in SPECIAL_CASES:
- if case in xmlfile:
- print("SKIP: %s" % xmlfile)
- return
- if 'errors' in xmlfile:
- response['status_code'] = 400
- # Handle the special cased __headers__ key if it exists.
- if b'__headers__' in response_body:
- loaded = json.loads(response_body.decode('utf-8'))
- response['headers'] = loaded.pop('__headers__')
- response['body'] = json.dumps(loaded).encode('utf-8')
- protocol = operation_model.service_model.protocol
- parser_cls = parsers.PROTOCOL_PARSERS[protocol]
- parser = parser_cls(timestamp_parser=lambda x: x)
- parsed = parser.parse(response, operation_model.output_shape)
- parsed = _convert_bytes_to_str(parsed)
- expected['ResponseMetadata']['HTTPStatusCode'] = response['status_code']
- expected['ResponseMetadata']['HTTPHeaders'] = response['headers']
-
- d2 = parsed
- d1 = expected
-
- if d1 != d2:
- log.debug('-' * 40)
- log.debug("XML FILE:\n" + xmlfile)
- log.debug('-' * 40)
- log.debug("ACTUAL:\n" + pprint.pformat(parsed))
- log.debug('-' * 40)
- log.debug("EXPECTED:\n" + pprint.pformat(expected))
- if not d1 == d2:
- # Borrowed from assertDictEqual, though this doesn't
- # handle the case when unicode literals are used in one
- # dict but not in the other (and we want to consider them
- # as being equal).
- print(d1)
- print()
- print(d2)
- pretty_d1 = pprint.pformat(d1, width=1).splitlines()
- pretty_d2 = pprint.pformat(d2, width=1).splitlines()
- diff = ('\n' + '\n'.join(difflib.ndiff(pretty_d1, pretty_d2)))
- raise AssertionError("Dicts are not equal:\n%s" % diff)
def _convert_bytes_to_str(parsed):
@@ -102,7 +57,7 @@ def _convert_bytes_to_str(parsed):
return parsed
-def test_xml_parsing():
+def _test_xml_parsing():
for dp in ['responses', 'errors']:
data_path = os.path.join(os.path.dirname(__file__), 'xml')
data_path = os.path.join(data_path, dp)
@@ -119,7 +74,7 @@ def test_xml_parsing():
expected = _get_expected_parsed_result(xmlfile)
operation_model = _get_operation_model(service_model, xmlfile)
raw_response_body = _get_raw_response_body(xmlfile)
- _test_parsed_response(xmlfile, raw_response_body,
+ yield (xmlfile, raw_response_body,
operation_model, expected)
@@ -153,7 +108,7 @@ def _get_expected_parsed_result(filename):
return json.load(f)
-def test_json_errors_parsing():
+def _test_json_errors_parsing():
# The outputs/ directory has sample output responses
# For each file in outputs/ there's a corresponding file
# in expected/ that has the expected parsed response.
@@ -179,8 +134,7 @@ def test_json_errors_parsing():
operation_model = service_model.operation_model(op_name)
with open(raw_response_file, 'rb') as f:
raw_response_body = f.read()
- _test_parsed_response(raw_response_file,
- raw_response_body, operation_model, expected)
+ yield (raw_response_file, raw_response_body, operation_model, expected)
def _uhg_test_json_parsing():
@@ -202,11 +156,61 @@ def _uhg_test_json_parsing():
operation_model = _get_operation_model(service_model, jsonfile)
with open(jsonfile, 'rb') as f:
raw_response_body = f.read()
- yield _test_parsed_response, jsonfile, \
- raw_response_body, operation_model, expected
+ yield (jsonfile, raw_response_body, operation_model, expected)
# TODO: handle the __headers crap.
+@pytest.mark.parametrize("xmlfile,response_body,operation_model,expected",
+itertools.chain(_test_json_errors_parsing(), _test_xml_parsing()))
+def test_parsed_response(xmlfile, response_body, operation_model, expected):
+ response = {
+ 'body': response_body,
+ 'status_code': 200,
+ 'headers': {}
+ }
+ for case in SPECIAL_CASES:
+ if case in xmlfile:
+ print("SKIP: %s" % xmlfile)
+ return
+ if 'errors' in xmlfile:
+ response['status_code'] = 400
+ # Handle the special cased __headers__ key if it exists.
+ if b'__headers__' in response_body:
+ loaded = json.loads(response_body.decode('utf-8'))
+ response['headers'] = loaded.pop('__headers__')
+ response['body'] = json.dumps(loaded).encode('utf-8')
+
+ protocol = operation_model.service_model.protocol
+ parser_cls = parsers.PROTOCOL_PARSERS[protocol]
+ parser = parser_cls(timestamp_parser=lambda x: x)
+ parsed = parser.parse(response, operation_model.output_shape)
+ parsed = _convert_bytes_to_str(parsed)
+ expected['ResponseMetadata']['HTTPStatusCode'] = response['status_code']
+ expected['ResponseMetadata']['HTTPHeaders'] = response['headers']
+
+ d2 = parsed
+ d1 = expected
+
+ if d1 != d2:
+ log.debug('-' * 40)
+ log.debug("XML FILE:\n" + xmlfile)
+ log.debug('-' * 40)
+ log.debug("ACTUAL:\n" + pprint.pformat(parsed))
+ log.debug('-' * 40)
+ log.debug("EXPECTED:\n" + pprint.pformat(expected))
+ if not d1 == d2:
+ # Borrowed from assertDictEqual, though this doesn't
+ # handle the case when unicode literals are used in one
+ # dict but not in the other (and we want to consider them
+ # as being equal).
+ print(d1)
+ print()
+ print(d2)
+ pretty_d1 = pprint.pformat(d1, width=1).splitlines()
+ pretty_d2 = pprint.pformat(d2, width=1).splitlines()
+ diff = ('\n' + '\n'.join(difflib.ndiff(pretty_d1, pretty_d2)))
+ raise AssertionError("Dicts are not equal:\n%s" % diff)
+
#class TestHeaderParsing(unittest.TestCase):
#
# maxDiff = None
diff --git a/tests/unit/retries/test_adaptive.py b/tests/unit/retries/test_adaptive.py
index 5c495187..5ec18eb4 100644
--- a/tests/unit/retries/test_adaptive.py
+++ b/tests/unit/retries/test_adaptive.py
@@ -24,11 +24,9 @@ class TestCanCreateRetryHandler(unittest.TestCase):
def test_can_register_retry_handler(self):
client = mock.Mock()
limiter = adaptive.register_retry_handler(client)
- self.assertEqual(
- client.meta.events.register.call_args_list,
- [mock.call('before-send', limiter.on_sending_request),
- mock.call('needs-retry', limiter.on_receiving_response)]
- )
+ assert client.meta.events.register.call_args_list == [
+ mock.call('before-send', limiter.on_sending_request),
+ mock.call('needs-retry', limiter.on_receiving_response)]
class TestClientRateLimiter(unittest.TestCase):
diff --git a/tests/unit/retries/test_bucket.py b/tests/unit/retries/test_bucket.py
index f056c6c6..b75fc0ee 100644
--- a/tests/unit/retries/test_bucket.py
+++ b/tests/unit/retries/test_bucket.py
@@ -1,4 +1,5 @@
from tests import unittest
+import pytest
from botocore.retries import bucket
from botocore.exceptions import CapacityNotAvailableError
@@ -37,7 +38,7 @@ class TestTokenBucket(unittest.TestCase):
])
token_bucket = self.create_token_bucket(max_rate=10)
for _ in range(5):
- self.assertTrue(token_bucket.acquire(1, block=False))
+ assert token_bucket.acquire(1, block=False)
def test_can_change_max_capacity_lower(self):
# Requests at 1 TPS.
@@ -45,21 +46,21 @@ class TestTokenBucket(unittest.TestCase):
token_bucket = self.create_token_bucket(max_rate=10)
# Request the first 5 tokens with max_rate=10
for _ in range(5):
- self.assertTrue(token_bucket.acquire(1, block=False))
+ assert token_bucket.acquire(1, block=False)
# Now scale the max_rate down to 1 on the 5th second.
self.timestamp_sequences.append(5)
token_bucket.max_rate = 1
# And then from seconds 6-10 we request at one per second.
self.timestamp_sequences.extend([6, 7, 8, 9, 10])
for _ in range(5):
- self.assertTrue(token_bucket.acquire(1, block=False))
+ assert token_bucket.acquire(1, block=False)
def test_max_capacity_is_at_least_one(self):
token_bucket = self.create_token_bucket()
self.timestamp_sequences.append(1)
token_bucket.max_rate = 0.5
- self.assertEqual(token_bucket.max_rate, 0.5)
- self.assertEqual(token_bucket.max_capacity, 1)
+ assert token_bucket.max_rate == 0.5
+ assert token_bucket.max_capacity == 1
def test_acquire_fails_on_non_block_mode_returns_false(self):
self.timestamp_sequences.extend([
@@ -69,7 +70,7 @@ class TestTokenBucket(unittest.TestCase):
1
])
token_bucket = self.create_token_bucket(max_rate=10)
- with self.assertRaises(CapacityNotAvailableError):
+ with pytest.raises(CapacityNotAvailableError):
token_bucket.acquire(100, block=False)
def test_can_retrieve_at_max_send_rate(self):
@@ -79,7 +80,7 @@ class TestTokenBucket(unittest.TestCase):
])
token_bucket = self.create_token_bucket(max_rate=10)
for _ in range(20):
- self.assertTrue(token_bucket.acquire(1, block=False))
+ assert token_bucket.acquire(1, block=False)
def test_acquiring_blocks_when_capacity_reached(self):
# This is 1 token every 0.1 seconds.
@@ -98,15 +99,15 @@ class TestTokenBucket(unittest.TestCase):
# test run time), we have to go slightly over 0.3 seconds here.
0.300001,
])
- self.assertTrue(token_bucket.acquire(1, block=False))
- self.assertEqual(token_bucket.available_capacity, 0)
- self.assertTrue(token_bucket.acquire(1, block=True))
- self.assertEqual(token_bucket.available_capacity, 0)
- self.assertTrue(token_bucket.acquire(1, block=False))
+ assert token_bucket.acquire(1, block=False)
+ assert token_bucket.available_capacity == 0
+ assert token_bucket.acquire(1, block=True)
+ assert token_bucket.available_capacity == 0
+ assert token_bucket.acquire(1, block=False)
def test_rate_cant_go_below_min(self):
token_bucket = self.create_token_bucket(max_rate=1, min_rate=0.2)
self.timestamp_sequences.append(1)
token_bucket.max_rate = 0.1
- self.assertEqual(token_bucket.max_rate, 0.2)
- self.assertEqual(token_bucket.max_capacity, 1)
+ assert token_bucket.max_rate == 0.2
+ assert token_bucket.max_capacity == 1
diff --git a/tests/unit/retries/test_quota.py b/tests/unit/retries/test_quota.py
index 9060b94d..dca4646b 100644
--- a/tests/unit/retries/test_quota.py
+++ b/tests/unit/retries/test_quota.py
@@ -9,27 +9,27 @@ class TestRetryQuota(unittest.TestCase):
self.retry_quota = quota.RetryQuota(50)
def test_can_acquire_amount(self):
- self.assertTrue(self.retry_quota.acquire(5))
- self.assertEqual(self.retry_quota.available_capacity, 45)
+ assert self.retry_quota.acquire(5)
+ assert self.retry_quota.available_capacity == 45
def test_can_release_amount(self):
- self.assertTrue(self.retry_quota.acquire(5))
- self.assertEqual(self.retry_quota.available_capacity, 45)
+ assert self.retry_quota.acquire(5)
+ assert self.retry_quota.available_capacity == 45
self.retry_quota.release(5)
- self.assertEqual(self.retry_quota.available_capacity, 50)
+ assert self.retry_quota.available_capacity == 50
def test_cant_exceed_max_capacity(self):
- self.assertTrue(self.retry_quota.acquire(5))
- self.assertEqual(self.retry_quota.available_capacity, 45)
+ assert self.retry_quota.acquire(5)
+ assert self.retry_quota.available_capacity == 45
self.retry_quota.release(10)
- self.assertEqual(self.retry_quota.available_capacity, 50)
+ assert self.retry_quota.available_capacity == 50
def test_noop_if_at_max_capacity(self):
self.retry_quota.release(10)
- self.assertEqual(self.retry_quota.available_capacity, 50)
+ assert self.retry_quota.available_capacity == 50
def test_cant_go_below_zero(self):
- self.assertTrue(self.retry_quota.acquire(49))
- self.assertEqual(self.retry_quota.available_capacity, 1)
- self.assertFalse(self.retry_quota.acquire(10))
- self.assertEqual(self.retry_quota.available_capacity, 1)
+ assert self.retry_quota.acquire(49)
+ assert self.retry_quota.available_capacity == 1
+ assert not self.retry_quota.acquire(10)
+ assert self.retry_quota.available_capacity == 1
diff --git a/tests/unit/retries/test_standard.py b/tests/unit/retries/test_standard.py
index f8a14a9f..9879578d 100644
--- a/tests/unit/retries/test_standard.py
+++ b/tests/unit/retries/test_standard.py
@@ -279,8 +279,7 @@ class TestRetryHandler(unittest.TestCase):
self.retry_quota.acquire_retry_quota.return_value = True
self.retry_policy.compute_retry_delay.return_value = 1
- self.assertEqual(
- self.retry_handler.needs_retry(fake_kwargs='foo'), 1)
+ assert self.retry_handler.needs_retry(fake_kwargs='foo') == 1
self.retry_event_adapter.create_retry_context.assert_called_with(
fake_kwargs='foo')
self.retry_policy.should_retry.assert_called_with(
@@ -295,9 +294,9 @@ class TestRetryHandler(unittest.TestCase):
mock.sentinel.retry_context
self.retry_policy.should_retry.return_value = False
- self.assertIsNone(self.retry_handler.needs_retry(fake_kwargs='foo'))
+ assert self.retry_handler.needs_retry(fake_kwargs='foo') is None
# Shouldn't consult quota if we don't have a retryable condition.
- self.assertFalse(self.retry_quota.acquire_retry_quota.called)
+ assert not self.retry_quota.acquire_retry_quota.called
def test_needs_retry_but_not_enough_quota(self):
self.retry_event_adapter.create_retry_context.return_value = \
@@ -305,13 +304,13 @@ class TestRetryHandler(unittest.TestCase):
self.retry_policy.should_retry.return_value = True
self.retry_quota.acquire_retry_quota.return_value = False
- self.assertIsNone(self.retry_handler.needs_retry(fake_kwargs='foo'))
+ assert self.retry_handler.needs_retry(fake_kwargs='foo') is None
def test_retry_handler_adds_retry_metadata_to_response(self):
self.retry_event_adapter.create_retry_context.return_value = \
mock.sentinel.retry_context
self.retry_policy.should_retry.return_value = False
- self.assertIsNone(self.retry_handler.needs_retry(fake_kwargs='foo'))
+ assert self.retry_handler.needs_retry(fake_kwargs='foo') is None
adapter = self.retry_event_adapter
adapter.adapt_retry_response_from_context.assert_called_with(
mock.sentinel.retry_context)
@@ -336,13 +335,12 @@ class TestRetryEventAdapter(unittest.TestCase):
operation=mock.sentinel.operation_model,
)
- self.assertEqual(context.attempt_number, 1)
- self.assertEqual(context.operation_model,
- mock.sentinel.operation_model)
- self.assertEqual(context.parsed_response, self.success_response)
- self.assertEqual(context.http_response, self.http_success)
- self.assertEqual(context.caught_exception, None)
- self.assertEqual(context.request_context, {'foo': 'bar'})
+ assert context.attempt_number == 1
+ assert context.operation_model == mock.sentinel.operation_model
+ assert context.parsed_response == self.success_response
+ assert context.http_response == self.http_success
+ assert context.caught_exception is None
+ assert context.request_context == {'foo': 'bar'}
def test_create_context_from_service_error(self):
context = standard.RetryEventAdapter().create_retry_context(
@@ -355,8 +353,8 @@ class TestRetryEventAdapter(unittest.TestCase):
# We already tested the other attributes in
# test_create_context_from_success_response so we're only checking
# the attributes relevant to this test.
- self.assertEqual(context.parsed_response, self.failed_response)
- self.assertEqual(context.http_response, self.http_failed)
+ assert context.parsed_response == self.failed_response
+ assert context.http_response == self.http_failed
def test_create_context_from_exception(self):
context = standard.RetryEventAdapter().create_retry_context(
@@ -366,9 +364,9 @@ class TestRetryEventAdapter(unittest.TestCase):
request_dict={'context': {'foo': 'bar'}},
operation=mock.sentinel.operation_model,
)
- self.assertEqual(context.parsed_response, None)
- self.assertEqual(context.http_response, None)
- self.assertEqual(context.caught_exception, self.caught_exception)
+ assert context.parsed_response is None
+ assert context.http_response is None
+ assert context.caught_exception == self.caught_exception
def test_can_inject_metadata_back_to_context(self):
adapter = standard.RetryEventAdapter()
@@ -381,10 +379,7 @@ class TestRetryEventAdapter(unittest.TestCase):
)
context.add_retry_metadata(MaxAttemptsReached=True)
adapter.adapt_retry_response_from_context(context)
- self.assertEqual(
- self.failed_response['ResponseMetadata']['MaxAttemptsReached'],
- True
- )
+ assert self.failed_response['ResponseMetadata']['MaxAttemptsReached'] is True
class TestRetryPolicy(unittest.TestCase):
@@ -397,14 +392,13 @@ class TestRetryPolicy(unittest.TestCase):
def test_delegates_to_retry_checker(self):
self.retry_checker.is_retryable.return_value = True
- self.assertTrue(self.retry_policy.should_retry(mock.sentinel.context))
+ assert self.retry_policy.should_retry(mock.sentinel.context)
self.retry_checker.is_retryable.assert_called_with(
mock.sentinel.context)
def test_delegates_to_retry_backoff(self):
self.retry_backoff.delay_amount.return_value = 1
- self.assertEqual(
- self.retry_policy.compute_retry_delay(mock.sentinel.context), 1)
+ assert self.retry_policy.compute_retry_delay(mock.sentinel.context) == 1
self.retry_backoff.delay_amount.assert_called_with(
mock.sentinel.context)
@@ -421,8 +415,7 @@ class TestExponentialBackoff(unittest.TestCase):
for i in range(1, 10)
]
# Note that we're capped at 20 which is our max backoff.
- self.assertEqual(backoffs,
- [1, 2, 4, 8, 16, 20, 20, 20, 20])
+ assert backoffs == [1, 2, 4, 8, 16, 20, 20, 20, 20]
def test_exponential_backoff_with_jitter(self):
backoff = standard.ExponentialBackoff()
@@ -433,7 +426,7 @@ class TestExponentialBackoff(unittest.TestCase):
# For attempt number 3, we should have a max value of 4 (2 ^ 2),
# so we can assert all the backoff values are within that range.
for x in backoffs:
- self.assertTrue(0 <= x <= 4)
+ assert 0 <= x <= 4
class TestRetryQuotaChecker(unittest.TestCase):
@@ -457,30 +450,24 @@ class TestRetryQuotaChecker(unittest.TestCase):
return context
def test_can_acquire_quota_non_timeout_error(self):
- self.assertTrue(
- self.quota_checker.acquire_retry_quota(self.create_context())
- )
- self.assertEqual(self.request_context['retry_quota_capacity'], 5)
+ assert self.quota_checker.acquire_retry_quota(self.create_context())
+ assert self.request_context['retry_quota_capacity'] == 5
def test_can_acquire_quota_for_timeout_error(self):
- self.assertTrue(
- self.quota_checker.acquire_retry_quota(
+ assert self.quota_checker.acquire_retry_quota(
self.create_context(is_timeout_error=True))
- )
- self.assertEqual(self.request_context['retry_quota_capacity'], 10)
+ assert self.request_context['retry_quota_capacity'] == 10
def test_can_release_quota_based_on_context_value_on_success(self):
context = self.create_context()
# This is where we had to retry the request but eventually
# succeeded.
http_response = self.create_context(status_code=200).http_response
- self.assertTrue(
- self.quota_checker.acquire_retry_quota(context)
- )
- self.assertEqual(self.quota.available_capacity, 495)
+ assert self.quota_checker.acquire_retry_quota(context)
+ assert self.quota.available_capacity == 495
self.quota_checker.release_retry_quota(context.request_context,
http_response=http_response)
- self.assertEqual(self.quota.available_capacity, 500)
+ assert self.quota.available_capacity == 500
def test_dont_release_quota_if_all_retries_failed(self):
context = self.create_context()
@@ -488,51 +475,40 @@ class TestRetryQuotaChecker(unittest.TestCase):
# our retry attempts and still failed. In this case we shouldn't
# give any retry quota back.
http_response = self.create_context(status_code=500).http_response
- self.assertTrue(
- self.quota_checker.acquire_retry_quota(context)
- )
- self.assertEqual(self.quota.available_capacity, 495)
+ assert self.quota_checker.acquire_retry_quota(context)
+ assert self.quota.available_capacity == 495
self.quota_checker.release_retry_quota(context.request_context,
http_response=http_response)
- self.assertEqual(self.quota.available_capacity, 495)
+ assert self.quota.available_capacity == 495
def test_can_release_default_quota_if_not_in_context(self):
context = self.create_context()
- self.assertTrue(
- self.quota_checker.acquire_retry_quota(context)
- )
- self.assertEqual(self.quota.available_capacity, 495)
+ assert self.quota_checker.acquire_retry_quota(context)
+ assert self.quota.available_capacity == 495
# We're going to remove the quota amount from the request context.
# This represents a successful request with no retries.
self.request_context.pop('retry_quota_capacity')
self.quota_checker.release_retry_quota(context.request_context,
context.http_response)
# We expect only 1 unit was released.
- self.assertEqual(self.quota.available_capacity, 496)
+ assert self.quota.available_capacity == 496
def test_acquire_quota_fails(self):
quota_checker = standard.RetryQuotaChecker(
quota.RetryQuota(initial_capacity=5))
# The first one succeeds.
- self.assertTrue(
- quota_checker.acquire_retry_quota(self.create_context())
- )
+ assert quota_checker.acquire_retry_quota(self.create_context())
# But we should fail now because we're out of quota.
self.request_context.pop('retry_quota_capacity')
- self.assertFalse(
- quota_checker.acquire_retry_quota(self.create_context())
- )
- self.assertNotIn('retry_quota_capacity', self.request_context)
+ assert not quota_checker.acquire_retry_quota(self.create_context())
+ assert 'retry_quota_capacity' not in self.request_context
def test_quota_reached_adds_retry_metadata(self):
quota_checker = standard.RetryQuotaChecker(
quota.RetryQuota(initial_capacity=0))
context = self.create_context()
- self.assertFalse(quota_checker.acquire_retry_quota(context))
- self.assertEqual(
- context.get_retry_metadata(),
- {'RetryQuotaReached': True}
- )
+ assert not quota_checker.acquire_retry_quota(context)
+ assert context.get_retry_metadata() == {'RetryQuotaReached': True}
def test_single_failed_request_does_not_give_back_quota(self):
context = self.create_context()
@@ -540,33 +516,32 @@ class TestRetryQuotaChecker(unittest.TestCase):
# First deduct some amount of the retry quota so we're not hitting
# the upper bound.
self.quota.acquire(50)
- self.assertEqual(self.quota.available_capacity, 450)
+ assert self.quota.available_capacity == 450
self.quota_checker.release_retry_quota(context.request_context,
http_response=http_response)
- self.assertEqual(self.quota.available_capacity, 450)
+ assert self.quota.available_capacity == 450
class TestRetryContext(unittest.TestCase):
def test_can_get_error_code(self):
context = arbitrary_retry_context()
context.parsed_response['Error']['Code'] = 'MyErrorCode'
- self.assertEqual(context.get_error_code(), 'MyErrorCode')
+ assert context.get_error_code() == 'MyErrorCode'
def test_no_error_code_if_no_parsed_response(self):
context = arbitrary_retry_context()
context.parsed_response = None
- self.assertIsNone(context.get_error_code())
+ assert context.get_error_code() is None
def test_no_error_code_returns_none(self):
context = arbitrary_retry_context()
context.parsed_response = {}
- self.assertIsNone(context.get_error_code())
+ assert context.get_error_code() is None
def test_can_add_retry_reason(self):
context = arbitrary_retry_context()
context.add_retry_metadata(MaxAttemptsReached=True)
- self.assertEqual(context.get_retry_metadata(),
- {'MaxAttemptsReached': True})
+ assert context.get_retry_metadata() == {'MaxAttemptsReached': True}
def test_handles_non_error_top_level_error_key_get_error_code(self):
response = AWSResponse(
@@ -606,16 +581,12 @@ class TestThrottlingErrorDetector(unittest.TestCase):
def test_can_check_error_from_code(self):
kwargs = self.create_needs_retry_kwargs()
kwargs['response'] = (None, {'Error': {'Code': 'ThrottledException'}})
- self.assertTrue(
- self.throttling_detector.is_throttling_error(**kwargs)
- )
+ assert self.throttling_detector.is_throttling_error(**kwargs)
def test_no_throttling_error(self):
kwargs = self.create_needs_retry_kwargs()
kwargs['response'] = (None, {'Error': {'Code': 'RandomError'}})
- self.assertFalse(
- self.throttling_detector.is_throttling_error(**kwargs)
- )
+ assert not self.throttling_detector.is_throttling_error(**kwargs)
def test_detects_modeled_errors(self):
kwargs = self.create_needs_retry_kwargs()
@@ -623,9 +594,7 @@ class TestThrottlingErrorDetector(unittest.TestCase):
None, {'Error': {'Code': 'ModeledThrottlingError'}}
)
kwargs['operation'] = get_operation_model_with_retries()
- self.assertTrue(
- self.throttling_detector.is_throttling_error(**kwargs)
- )
+ assert self.throttling_detector.is_throttling_error(**kwargs)
class TestModeledRetryErrorDetector(unittest.TestCase):
@@ -634,25 +603,19 @@ class TestModeledRetryErrorDetector(unittest.TestCase):
def test_not_retryable(self):
context = arbitrary_retry_context()
- self.assertIsNone(self.modeled_error.detect_error_type(context))
+ assert self.modeled_error.detect_error_type(context) is None
def test_transient_error(self):
context = arbitrary_retry_context()
context.parsed_response['Error']['Code'] = 'ModeledRetryableError'
context.operation_model = get_operation_model_with_retries()
- self.assertEqual(
- self.modeled_error.detect_error_type(context),
- self.modeled_error.TRANSIENT_ERROR
- )
+ assert self.modeled_error.detect_error_type(context) == self.modeled_error.TRANSIENT_ERROR
def test_throttling_error(self):
context = arbitrary_retry_context()
context.parsed_response['Error']['Code'] = 'ModeledThrottlingError'
context.operation_model = get_operation_model_with_retries()
- self.assertEqual(
- self.modeled_error.detect_error_type(context),
- self.modeled_error.THROTTLING_ERROR
- )
+ assert self.modeled_error.detect_error_type(context) == self.modeled_error.THROTTLING_ERROR
class Yes(standard.BaseRetryableChecker):
@@ -667,25 +630,17 @@ class No(standard.BaseRetryableChecker):
class TestOrRetryChecker(unittest.TestCase):
def test_can_match_any_checker(self):
- self.assertTrue(
- standard.OrRetryChecker(
+ assert standard.OrRetryChecker(
[Yes(), No()]
)
- )
- self.assertTrue(
- standard.OrRetryChecker(
+ assert standard.OrRetryChecker(
[No(), Yes()]
)
- )
- self.assertTrue(
- standard.OrRetryChecker(
+ assert standard.OrRetryChecker(
[Yes(), Yes()]
)
- )
def test_false_if_no_checkers_match(self):
- self.assertTrue(
- standard.OrRetryChecker(
+ assert standard.OrRetryChecker(
[No(), No(), No()]
)
- )
diff --git a/tests/unit/retries/test_throttling.py b/tests/unit/retries/test_throttling.py
index 9c1c69b7..3c87575d 100644
--- a/tests/unit/retries/test_throttling.py
+++ b/tests/unit/retries/test_throttling.py
@@ -1,5 +1,5 @@
from tests import unittest
-
+import pytest
from botocore.retries import throttling
@@ -17,9 +17,7 @@ class TestCubicCalculator(unittest.TestCase):
def test_starting_params(self):
cubic = self.create_cubic_calculator(starting_max_rate=10)
- self.assertAlmostEqual(
- cubic.get_params_snapshot().k, 1.9574338205844317
- )
+ assert pytest.approx(cubic.get_params_snapshot().k) == 1.9574338205844317
def test_success_responses_until_max_hit(self):
# For this test we're interested in the behavior less so than
@@ -31,17 +29,14 @@ class TestCubicCalculator(unittest.TestCase):
start_w_max = params.w_max
# Before we get to t == start_k, our throttle is below our
# max w_max
- assertLessEqual = self.assertLessEqual
- assertLessEqual(cubic.success_received(start_k / 3.0), start_w_max)
- assertLessEqual(cubic.success_received(start_k / 2.0), start_w_max)
- assertLessEqual(cubic.success_received(start_k / 1.1), start_w_max)
+ assert cubic.success_received(start_k / 3.0) <= start_w_max
+ assert cubic.success_received(start_k / 2.0) <= start_w_max
+ assert cubic.success_received(start_k / 1.1) <= start_w_max
# At t == start_k, we should be at w_max.
- self.assertAlmostEqual(cubic.success_received(timestamp=start_k), 10.0)
+ assert pytest.approx(cubic.success_received(timestamp=start_k)) == 10.0
# And once we pass start_k, we'll be above w_max.
- self.assertGreaterEqual(
- cubic.success_received(start_k * 1.1), start_w_max)
- self.assertGreaterEqual(
- cubic.success_received(start_k * 2.0), start_w_max)
+ assert cubic.success_received(start_k * 1.1) >= start_w_max
+ assert cubic.success_received(start_k * 2.0) >= start_w_max
def test_error_response_decreases_rate_by_beta(self):
# This is the default value here so we're just being explicit.
@@ -51,21 +46,18 @@ class TestCubicCalculator(unittest.TestCase):
rate_when_throttled = 8
new_rate = cubic.error_received(current_rate=rate_when_throttled,
timestamp=1)
- self.assertAlmostEqual(new_rate, rate_when_throttled * 0.7)
+ assert pytest.approx(new_rate) == rate_when_throttled * 0.7
new_params = cubic.get_params_snapshot()
- self.assertEqual(
- new_params,
- throttling.CubicParams(w_max=rate_when_throttled,
+ assert new_params == throttling.CubicParams(w_max=rate_when_throttled,
k=1.8171205928321397,
last_fail=1)
- )
def test_t_0_should_match_beta_decrease(self):
# So if I have beta of 0.6
cubic = self.create_cubic_calculator(starting_max_rate=10, beta=0.6)
# When I get throttled I should decrease my rate by 60%.
new_rate = cubic.error_received(current_rate=10, timestamp=1)
- self.assertEqual(new_rate, 6.0)
+ assert new_rate == 6.0
# And my starting rate at time t=1 should start at that new rate.
- self.assertAlmostEqual(cubic.success_received(timestamp=1), 6.0)
+ assert pytest.approx(cubic.success_received(timestamp=1)) == 6.0
--
2.29.2
From 99dd93c1547475d0e42a52574dfab7c18e2dbb9f Mon Sep 17 00:00:00 2001
From: Zidaan Dutta <ziddutta@amazon.com>
Date: Mon, 19 Oct 2020 19:26:04 -0400
Subject: [PATCH 09/14] pytest migration of botocore unit tests part2
---
tests/unit/retries/test_adaptive.py | 29 +-
tests/unit/retries/test_special.py | 14 +-
tests/unit/test_args.py | 87 +--
tests/unit/test_auth_sigv4.py | 2 +-
tests/unit/test_awsrequest.py | 166 ++---
tests/unit/test_client.py | 438 +++++------
tests/unit/test_compat.py | 52 +-
tests/unit/test_config_provider.py | 61 +-
tests/unit/test_configloader.py | 5 +-
tests/unit/test_credentials.py | 666 +++++++++--------
tests/unit/test_discovery.py | 86 +--
tests/unit/test_endpoint.py | 57 +-
tests/unit/test_errorfactory.py | 34 +-
tests/unit/test_exceptions.py | 49 +-
tests/unit/test_handlers.py | 333 ++++-----
tests/unit/test_history.py | 4 +-
tests/unit/test_hooks.py | 139 ++--
.../test_http_client_exception_mapping.py | 13 +-
tests/unit/test_http_session.py | 29 +-
tests/unit/test_idempotency.py | 6 +-
tests/unit/test_loaders.py | 93 ++-
tests/unit/test_model.py | 284 ++++---
tests/unit/test_monitoring.py | 244 +++---
tests/unit/test_paginate.py | 407 +++++-----
tests/unit/test_parsers.py | 281 ++++---
tests/unit/test_regions.py | 62 +-
tests/unit/test_response.py | 49 +-
tests/unit/test_retryhandler.py | 88 ++-
tests/unit/test_s3_addressing.py | 62 +-
tests/unit/test_serialize.py | 55 +-
tests/unit/test_session.py | 222 +++---
tests/unit/test_session_legacy.py | 259 +++----
tests/unit/test_signers.py | 153 ++--
tests/unit/test_stub.py | 32 +-
tests/unit/test_translate.py | 26 +-
tests/unit/test_utils.py | 700 ++++++++----------
tests/unit/test_validate.py | 30 +-
tests/unit/test_waiters.py | 145 ++--
38 files changed, 2528 insertions(+), 2934 deletions(-)
diff --git a/tests/unit/retries/test_adaptive.py b/tests/unit/retries/test_adaptive.py
index 5ec18eb4..76bd4164 100644
--- a/tests/unit/retries/test_adaptive.py
+++ b/tests/unit/retries/test_adaptive.py
@@ -1,5 +1,6 @@
from tests import unittest
+import pytest
from tests import mock
from botocore.retries import adaptive
@@ -52,7 +53,7 @@ class TestClientRateLimiter(unittest.TestCase):
def test_bucket_bucket_acquisition_only_if_enabled(self):
rate_limiter = self.create_client_limiter()
rate_limiter.on_sending_request(request=mock.sentinel.request)
- self.assertFalse(self.token_bucket.acquire.called)
+ assert not self.token_bucket.acquire.called
def test_token_bucket_enabled_on_throttling_error(self):
rate_limiter = self.create_client_limiter()
@@ -64,7 +65,7 @@ class TestClientRateLimiter(unittest.TestCase):
# token.
self.timestamp_sequences.append(1)
rate_limiter.on_sending_request(request=mock.sentinel.request)
- self.assertTrue(self.token_bucket.acquire.called)
+ assert self.token_bucket.acquire.called
def test_max_rate_updated_on_success_response(self):
rate_limiter = self.create_client_limiter()
@@ -72,7 +73,7 @@ class TestClientRateLimiter(unittest.TestCase):
self.rate_adjustor.success_received.return_value = 20
self.rate_clocker.record.return_value = 21
rate_limiter.on_receiving_response()
- self.assertEqual(self.token_bucket.max_rate, 20)
+ assert self.token_bucket.max_rate == 20
def test_max_rate_cant_exceed_20_percent_max(self):
rate_limiter = self.create_client_limiter()
@@ -84,7 +85,7 @@ class TestClientRateLimiter(unittest.TestCase):
# The most we should go up is 2.0 * 20
rate_limiter.on_receiving_response()
- self.assertEqual(self.token_bucket.max_rate, 2.0 * 20)
+ assert self.token_bucket.max_rate == 2.0 * 20
class TestRateClocker(unittest.TestCase):
@@ -95,19 +96,19 @@ class TestRateClocker(unittest.TestCase):
self.smoothing = 0.8
def test_initial_rate_is_0(self):
- self.assertEqual(self.rate_measure.measured_rate, 0)
+ assert self.rate_measure.measured_rate == 0
def test_time_updates_if_after_bucket_range(self):
self.timestamp_sequences.append(1)
# This should be 1 * 0.8 + 0 * 0.2, or just 0.8
- self.assertEqual(self.rate_measure.record(), 0.8)
+ assert self.rate_measure.record() == 0.8
def test_can_measure_constant_rate(self):
# Timestamps of 1 every second indicate a rate of 1 TPS.
self.timestamp_sequences.extend(range(1, 21))
for _ in range(20):
self.rate_measure.record()
- self.assertAlmostEqual(self.rate_measure.measured_rate, 1)
+ assert pytest.approx(self.rate_measure.measured_rate) == 1
def test_uses_smoothing_to_favor_recent_weights(self):
self.timestamp_sequences.extend([
@@ -125,13 +126,13 @@ class TestRateClocker(unittest.TestCase):
for _ in range(7):
self.rate_measure.record()
# We should almost be at 2.0 but not quite.
- self.assertGreaterEqual(self.rate_measure.measured_rate, 1.99)
- self.assertLessEqual(self.rate_measure.measured_rate, 2.0)
+ assert self.rate_measure.measured_rate >= 1.99
+ assert self.rate_measure.measured_rate <= 2.0
# With our last recording we now drop down between 0.1 and 2
# depending on our smoothing factor.
self.rate_measure.record()
- self.assertGreaterEqual(self.rate_measure.measured_rate, 0.1)
- self.assertLessEqual(self.rate_measure.measured_rate, 2.0)
+ assert self.rate_measure.measured_rate >= 0.1
+ assert self.rate_measure.measured_rate <= 2.0
def test_noop_when_delta_t_is_0(self):
self.timestamp_sequences.extend([
@@ -143,7 +144,7 @@ class TestRateClocker(unittest.TestCase):
])
for _ in range(5):
self.rate_measure.record()
- self.assertGreaterEqual(self.rate_measure.measured_rate, 1.0)
+ assert self.rate_measure.measured_rate >= 1.0
def test_times_are_grouped_per_time_bucket(self):
# Using our default of 0.5 time buckets, we have:
@@ -159,7 +160,7 @@ class TestRateClocker(unittest.TestCase):
# This is showing the tradeoff we're making with measuring rates.
# we're currently in the window from 0 <= x < 0.5, which means
# we use the rate from the previous bucket, which is 0:
- self.assertEqual(self.rate_measure.measured_rate, 0)
+ assert self.rate_measure.measured_rate == 0
# However if we now add a new measurement that's in the next
# time bucket 0.5 <= x < 1.0
# we'll use the range from the previous bucket:
@@ -167,4 +168,4 @@ class TestRateClocker(unittest.TestCase):
self.rate_measure.record()
# And our previous bucket will be:
# 12 * 0.8 + 0.2 * 0
- self.assertEqual(self.rate_measure.measured_rate, 12 * 0.8)
+ assert self.rate_measure.measured_rate == 12 * 0.8
diff --git a/tests/unit/retries/test_special.py b/tests/unit/retries/test_special.py
index f643b902..0e36dedb 100644
--- a/tests/unit/retries/test_special.py
+++ b/tests/unit/retries/test_special.py
@@ -26,7 +26,7 @@ class TestRetryIDPCommunicationError(unittest.TestCase):
status_code=400, raw=None, headers={},
url='https://foo'),
caught_exception=None, )
- self.assertFalse(self.checker.is_retryable(context))
+ assert not self.checker.is_retryable(context)
def test_can_retry_idp_communication_error(self):
context = standard.RetryContext(
@@ -38,7 +38,7 @@ class TestRetryIDPCommunicationError(unittest.TestCase):
status_code=400, raw=None, headers={},
url='https://foo'),
caught_exception=None, )
- self.assertTrue(self.checker.is_retryable(context))
+ assert self.checker.is_retryable(context) is True
def test_not_idp_communication_error(self):
context = standard.RetryContext(
@@ -50,7 +50,7 @@ class TestRetryIDPCommunicationError(unittest.TestCase):
status_code=400, raw=None, headers={},
url='https://foo'),
caught_exception=None, )
- self.assertFalse(self.checker.is_retryable(context))
+ assert not self.checker.is_retryable(context)
class TestRetryDDBChecksumError(unittest.TestCase):
@@ -74,7 +74,7 @@ class TestRetryDDBChecksumError(unittest.TestCase):
url='https://foo'),
caught_exception=None,
)
- self.assertFalse(self.checker.is_retryable(context))
+ assert not self.checker.is_retryable(context)
def test_checksum_matches(self):
context = standard.RetryContext(
@@ -88,7 +88,7 @@ class TestRetryDDBChecksumError(unittest.TestCase):
url='https://foo'),
caught_exception=None
)
- self.assertFalse(self.checker.is_retryable(context))
+ assert not self.checker.is_retryable(context)
def test_checksum_not_matches(self):
context = standard.RetryContext(
@@ -102,7 +102,7 @@ class TestRetryDDBChecksumError(unittest.TestCase):
url='https://foo'),
caught_exception=None
)
- self.assertTrue(self.checker.is_retryable(context))
+ assert self.checker.is_retryable(context) is True
def test_checksum_check_only_for_dynamodb(self):
context = standard.RetryContext(
@@ -116,4 +116,4 @@ class TestRetryDDBChecksumError(unittest.TestCase):
url='https://foo'),
caught_exception=None
)
- self.assertFalse(self.checker.is_retryable(context))
+ assert not self.checker.is_retryable(context)
diff --git a/tests/unit/test_args.py b/tests/unit/test_args.py
index e2427a02..6f8f36a4 100644
--- a/tests/unit/test_args.py
+++ b/tests/unit/test_args.py
@@ -12,6 +12,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import socket
+import pytest
import botocore.config
from tests import unittest
@@ -100,34 +101,27 @@ class TestCreateClientArgs(unittest.TestCase):
)
def test_compute_s3_configuration(self):
- self.assertIsNone(self.args_create.compute_s3_config(None))
+ assert self.args_create.compute_s3_config(None) is None
def test_compute_s3_config_only_config_store(self):
self.config_store.set_config_variable(
's3', {'use_accelerate_endpoint': True})
- self.assertEqual(
- self.args_create.compute_s3_config(None),
- {'use_accelerate_endpoint': True}
- )
+ assert self.args_create.compute_s3_config(None) == {
+ 'use_accelerate_endpoint': True}
def test_client_s3_accelerate_from_client_config(self):
- self.assertEqual(
- self.args_create.compute_s3_config(
+ assert self.args_create.compute_s3_config(
client_config=Config(s3={'use_accelerate_endpoint': True})
- ),
- {'use_accelerate_endpoint': True}
- )
+ ) == {
+ 'use_accelerate_endpoint': True}
def test_client_s3_accelerate_client_config_overrides_config_store(self):
self.config_store.set_config_variable(
's3', {'use_accelerate_endpoint': False})
- self.assertEqual(
- self.args_create.compute_s3_config(
+ # client_config beats scoped_config
+ assert self.args_create.compute_s3_config(
client_config=Config(s3={'use_accelerate_endpoint': True})
- ),
- # client_config beats scoped_config
- {'use_accelerate_endpoint': True}
- )
+ ) == {'use_accelerate_endpoint': True}
def test_max_pool_from_client_config_forwarded_to_endpoint_creator(self):
config = botocore.config.Config(max_pool_connections=20)
@@ -161,8 +155,7 @@ class TestCreateClientArgs(unittest.TestCase):
]
client_args = self.call_get_client_args(
endpoint_url='http://other.com/')
- self.assertEqual(
- client_args['client_config'].region_name, 'us-west-2')
+ assert client_args['client_config'].region_name == 'us-west-2'
def test_region_does_not_resolve_if_not_s3_and_endpoint_url_provided(self):
self.service_model.endpoint_prefix = 'ec2'
@@ -174,7 +167,7 @@ class TestCreateClientArgs(unittest.TestCase):
}]
client_args = self.call_get_client_args(
endpoint_url='http://other.com/')
- self.assertEqual(client_args['client_config'].region_name, None)
+ assert client_args['client_config'].region_name is None
def test_tcp_keepalive_enabled(self):
scoped_config = {'tcp_keepalive': 'true'}
@@ -217,10 +210,8 @@ class TestCreateClientArgs(unittest.TestCase):
service_model=self._get_service_model('sts'),
region_name='us-west-2', endpoint_url=None
)
- self.assertEqual(
- client_args['endpoint'].host, 'https://sts.amazonaws.com')
- self.assertEqual(
- client_args['request_signer'].region_name, 'us-east-1')
+ assert client_args['endpoint'].host == 'https://sts.amazonaws.com'
+ assert client_args['request_signer'].region_name == 'us-east-1'
def test_sts_use_resolved_endpoint_for_nonlegacy_region(self):
resolved_endpoint = 'https://resolved-endpoint'
@@ -235,9 +226,8 @@ class TestCreateClientArgs(unittest.TestCase):
service_model=self._get_service_model('sts'),
region_name='ap-east-1', endpoint_url=None
)
- self.assertEqual(client_args['endpoint'].host, resolved_endpoint)
- self.assertEqual(
- client_args['request_signer'].region_name, resolved_region)
+ assert client_args['endpoint'].host == resolved_endpoint
+ assert client_args['request_signer'].region_name == resolved_region
def test_sts_use_resolved_endpoint_for_regional_configuration(self):
resolved_endpoint = 'https://resolved-endpoint'
@@ -252,9 +242,8 @@ class TestCreateClientArgs(unittest.TestCase):
service_model=self._get_service_model('sts'),
region_name='us-west-2', endpoint_url=None
)
- self.assertEqual(client_args['endpoint'].host, resolved_endpoint)
- self.assertEqual(
- client_args['request_signer'].region_name, resolved_region)
+ assert client_args['endpoint'].host == resolved_endpoint
+ assert client_args['request_signer'].region_name == resolved_region
def test_sts_with_endpoint_override_and_legacy_configured(self):
override_endpoint = 'https://override-endpoint'
@@ -265,7 +254,7 @@ class TestCreateClientArgs(unittest.TestCase):
service_model=self._get_service_model('sts'),
region_name='us-west-2', endpoint_url=override_endpoint
)
- self.assertEqual(client_args['endpoint'].host, override_endpoint)
+ assert client_args['endpoint'].host == override_endpoint
def test_sts_http_scheme_for_override_endpoint(self):
self.config_store.set_config_variable(
@@ -275,8 +264,7 @@ class TestCreateClientArgs(unittest.TestCase):
region_name='us-west-2', endpoint_url=None, is_secure=False,
)
- self.assertEqual(
- client_args['endpoint'].host, 'http://sts.amazonaws.com')
+ assert client_args['endpoint'].host == 'http://sts.amazonaws.com'
def test_sts_regional_endpoints_defaults_to_legacy_if_not_set(self):
self.config_store.set_config_variable(
@@ -285,15 +273,13 @@ class TestCreateClientArgs(unittest.TestCase):
service_model=self._get_service_model('sts'),
region_name='us-west-2', endpoint_url=None
)
- self.assertEqual(
- client_args['endpoint'].host, 'https://sts.amazonaws.com')
- self.assertEqual(
- client_args['request_signer'].region_name, 'us-east-1')
+ assert client_args['endpoint'].host == 'https://sts.amazonaws.com'
+ assert client_args['request_signer'].region_name == 'us-east-1'
def test_invalid_sts_regional_endpoints(self):
self.config_store.set_config_variable(
'sts_regional_endpoints', 'invalid')
- with self.assertRaises(
+ with pytest.raises(
exceptions.InvalidSTSRegionalEndpointsConfigError):
self.call_get_client_args(
service_model=self._get_service_model('sts'),
@@ -303,54 +289,51 @@ class TestCreateClientArgs(unittest.TestCase):
def test_provides_total_max_attempts(self):
config = botocore.config.Config(retries={'total_max_attempts': 10})
client_args = self.call_get_client_args(client_config=config)
- self.assertEqual(
- client_args['client_config'].retries['total_max_attempts'], 10)
+ assert client_args['client_config'].retries['total_max_attempts'] == 10
def test_provides_total_max_attempts_has_precedence(self):
config = botocore.config.Config(retries={'total_max_attempts': 10,
'max_attempts': 5})
client_args = self.call_get_client_args(client_config=config)
- self.assertEqual(
- client_args['client_config'].retries['total_max_attempts'], 10)
- self.assertNotIn('max_attempts', client_args['client_config'].retries)
+ assert client_args['client_config'].retries['total_max_attempts'] == 10
+ assert 'max_attempts' not in client_args['client_config'].retries
def test_provide_retry_config_maps_total_max_attempts(self):
config = botocore.config.Config(retries={'max_attempts': 10})
client_args = self.call_get_client_args(client_config=config)
- self.assertEqual(
- client_args['client_config'].retries['total_max_attempts'], 11)
- self.assertNotIn('max_attempts', client_args['client_config'].retries)
+ assert client_args['client_config'].retries['total_max_attempts'], 11
+ assert 'max_attempts' not in client_args['client_config'].retries
def test_can_merge_max_attempts(self):
self.config_store.set_config_variable('max_attempts', 4)
config = self.call_get_client_args()['client_config']
- self.assertEqual(config.retries['total_max_attempts'], 4)
+ assert config.retries['total_max_attempts'] == 4
def test_uses_config_value_if_present_for_max_attempts(self):
config = self.call_get_client_args(
client_config=Config(retries={'max_attempts': 2})
)['client_config']
- self.assertEqual(config.retries['total_max_attempts'], 3)
+ assert config.retries['total_max_attempts'] == 3
def test_uses_client_config_over_config_store_max_attempts(self):
self.config_store.set_config_variable('max_attempts', 4)
config = self.call_get_client_args(
client_config=Config(retries={'max_attempts': 2})
)['client_config']
- self.assertEqual(config.retries['total_max_attempts'], 3)
+ assert config.retries['total_max_attempts'] == 3
def test_uses_client_config_total_over_config_store_max_attempts(self):
self.config_store.set_config_variable('max_attempts', 4)
config = self.call_get_client_args(
client_config=Config(retries={'total_max_attempts': 2})
)['client_config']
- self.assertEqual(config.retries['total_max_attempts'], 2)
+ assert config.retries['total_max_attempts'] == 2
def test_max_attempts_unset_if_retries_is_none(self):
config = self.call_get_client_args(
client_config=Config(retries=None)
)['client_config']
- self.assertEqual(config.retries, {'mode': 'legacy'})
+ assert config.retries == {'mode': 'legacy'}
def test_retry_mode_set_on_config_store(self):
self.config_store.set_config_variable('retry_mode', 'standard')
@@ -361,11 +344,11 @@ class TestCreateClientArgs(unittest.TestCase):
config = self.call_get_client_args(
client_config=Config(retries={'mode': 'standard'})
)['client_config']
- self.assertEqual(config.retries['mode'], 'standard')
+ assert config.retries['mode'] == 'standard'
def test_client_config_beats_config_store(self):
self.config_store.set_config_variable('retry_mode', 'adaptive')
config = self.call_get_client_args(
client_config=Config(retries={'mode': 'standard'})
)['client_config']
- self.assertEqual(config.retries['mode'], 'standard')
+ assert config.retries['mode'] == 'standard'
diff --git a/tests/unit/test_auth_sigv4.py b/tests/unit/test_auth_sigv4.py
index 7c253ee9..058809c2 100644
--- a/tests/unit/test_auth_sigv4.py
+++ b/tests/unit/test_auth_sigv4.py
@@ -30,4 +30,4 @@ class TestSigV4Auth(unittest.TestCase):
expected_host = 's5.us-weast-2.amazonaws.com'
request = AWSRequest(method='GET', url=endpoint)
headers_to_sign = self.sigv4.headers_to_sign(request)
- self.assertEqual(expected_host, headers_to_sign.get('host'))
+ assert expected_host == headers_to_sign.get('host')
diff --git a/tests/unit/test_awsrequest.py b/tests/unit/test_awsrequest.py
index 69642e99..437ccdb0 100644
--- a/tests/unit/test_awsrequest.py
+++ b/tests/unit/test_awsrequest.py
@@ -18,6 +18,7 @@ import tempfile
import shutil
import io
import socket
+import pytest
from tests import mock
from urllib3.connectionpool import HTTPConnectionPool, HTTPSConnectionPool
@@ -114,43 +115,43 @@ class TestAWSRequest(unittest.TestCase):
'url=http://example.com, headers={}>'
)
request_repr = repr(self.prepared_request)
- self.assertEqual(request_repr, expected_repr)
+ assert request_repr == expected_repr
def test_can_prepare_url_params(self):
request = AWSRequest(url='http://example.com/', params={'foo': 'bar'})
prepared_request = request.prepare()
- self.assertEqual(prepared_request.url, 'http://example.com/?foo=bar')
+ assert prepared_request.url == 'http://example.com/?foo=bar'
def test_can_prepare_dict_body(self):
body = {'dead': 'beef'}
request = AWSRequest(url='http://example.com/', data=body)
prepared_request = request.prepare()
- self.assertEqual(prepared_request.body, 'dead=beef')
+ assert prepared_request.body == 'dead=beef'
def test_can_prepare_dict_body_unicode_values(self):
body = {'Text': u'\u30c6\u30b9\u30c8 string'}
expected_body = 'Text=%E3%83%86%E3%82%B9%E3%83%88+string'
request = AWSRequest(url='http://example.com/', data=body)
prepared_request = request.prepare()
- self.assertEqual(prepared_request.body, expected_body)
+ assert prepared_request.body == expected_body
def test_can_prepare_dict_body_unicode_keys(self):
body = {u'\u30c6\u30b9\u30c8': 'string'}
expected_body = '%E3%83%86%E3%82%B9%E3%83%88=string'
request = AWSRequest(url='http://example.com/', data=body)
prepared_request = request.prepare()
- self.assertEqual(prepared_request.body, expected_body)
+ assert prepared_request.body == expected_body
def test_can_prepare_empty_body(self):
request = AWSRequest(url='http://example.com/', data=b'')
prepared_request = request.prepare()
- self.assertEqual(prepared_request.body, None)
+ assert prepared_request.body is None
content_length = prepared_request.headers.get('content-length')
- self.assertEqual(content_length, '0')
+ assert content_length == '0'
def test_request_body_is_prepared(self):
request = AWSRequest(url='http://example.com/', data='body')
- self.assertEqual(request.body, b'body')
+ assert request.body == b'body'
def test_prepare_body_content_adds_content_length(self):
content = b'foobarbaz'
@@ -163,46 +164,46 @@ class TestAWSRequest(unittest.TestCase):
self.request.method = 'POST'
prepared_request = self.request.prepare()
calculated_len = prepared_request.headers['Content-Length']
- self.assertEqual(calculated_len, expected_len)
+ assert calculated_len == expected_len
def test_prepare_body_doesnt_override_content_length(self):
self.request.method = 'PUT'
self.request.headers['Content-Length'] = '20'
self.request.data = b'asdf'
prepared_request = self.request.prepare()
- self.assertEqual(prepared_request.headers['Content-Length'], '20')
+ assert prepared_request.headers['Content-Length'] == '20'
def test_prepare_body_doesnt_set_content_length_head(self):
self.request.method = 'HEAD'
self.request.data = b'thisshouldntbehere'
prepared_request = self.request.prepare()
- self.assertEqual(prepared_request.headers.get('Content-Length'), None)
+ assert prepared_request.headers.get('Content-Length') is None
def test_prepare_body_doesnt_set_content_length_get(self):
self.request.method = 'GET'
self.request.data = b'thisshouldntbehere'
prepared_request = self.request.prepare()
- self.assertEqual(prepared_request.headers.get('Content-Length'), None)
+ assert prepared_request.headers.get('Content-Length') is None
def test_prepare_body_doesnt_set_content_length_options(self):
self.request.method = 'OPTIONS'
self.request.data = b'thisshouldntbehere'
prepared_request = self.request.prepare()
- self.assertEqual(prepared_request.headers.get('Content-Length'), None)
+ assert prepared_request.headers.get('Content-Length') is None
def test_can_reset_stream_handles_binary(self):
contents = b'notastream'
self.prepared_request.body = contents
self.prepared_request.reset_stream()
# assert the request body doesn't change after reset_stream is called
- self.assertEqual(self.prepared_request.body, contents)
+ assert self.prepared_request.body == contents
def test_can_reset_stream_handles_bytearray(self):
contents = bytearray(b'notastream')
self.prepared_request.body = contents
self.prepared_request.reset_stream()
# assert the request body doesn't change after reset_stream is called
- self.assertEqual(self.prepared_request.body, contents)
+ assert self.prepared_request.body == contents
def test_can_reset_stream(self):
contents = b'foobarbaz'
@@ -212,11 +213,11 @@ class TestAWSRequest(unittest.TestCase):
self.prepared_request.body = body
# pretend the request body was partially sent
body.read()
- self.assertNotEqual(body.tell(), 0)
+ assert body.tell() != 0
# have the prepared request reset its stream
self.prepared_request.reset_stream()
# the stream should be reset
- self.assertEqual(body.tell(), 0)
+ assert body.tell() == 0
def test_cannot_reset_stream_raises_error(self):
contents = b'foobarbaz'
@@ -226,9 +227,9 @@ class TestAWSRequest(unittest.TestCase):
self.prepared_request.body = Unseekable(body)
# pretend the request body was partially sent
body.read()
- self.assertNotEqual(body.tell(), 0)
+ assert body.tell() != 0
# reset stream should fail
- with self.assertRaises(UnseekableStreamError):
+ with pytest.raises(UnseekableStreamError):
self.prepared_request.reset_stream()
def test_duck_type_for_file_check(self):
@@ -247,7 +248,7 @@ class TestAWSRequest(unittest.TestCase):
self.prepared_request.body = looks_like_file
self.prepared_request.reset_stream()
# The stream should now be reset.
- self.assertTrue(looks_like_file.seek_called)
+ assert looks_like_file.seek_called
class TestAWSResponse(unittest.TestCase):
@@ -263,19 +264,19 @@ class TestAWSResponse(unittest.TestCase):
def test_content_property(self):
self.set_raw_stream([b'some', b'data'])
- self.assertEqual(self.response.content, b'somedata')
- self.assertEqual(self.response.content, b'somedata')
+ assert self.response.content == b'somedata'
+ assert self.response.content == b'somedata'
# assert that stream was not called more than once
- self.assertEqual(self.response.raw.stream.call_count, 1)
+ assert self.response.raw.stream.call_count == 1
def test_text_property(self):
self.set_raw_stream([b'\xe3\x82\xb8\xe3\x83\xa7\xe3\x82\xb0'])
self.response.headers['content-type'] = 'text/plain; charset=utf-8'
- self.assertEqual(self.response.text, u'\u30b8\u30e7\u30b0')
+ assert self.response.text == u'\u30b8\u30e7\u30b0'
def test_text_property_defaults_utf8(self):
self.set_raw_stream([b'\xe3\x82\xb8\xe3\x83\xa7\xe3\x82\xb0'])
- self.assertEqual(self.response.text, u'\u30b8\u30e7\u30b0')
+ assert self.response.text == u'\u30b8\u30e7\u30b0'
class TestAWSHTTPConnection(unittest.TestCase):
@@ -328,9 +329,9 @@ class TestAWSHTTPConnection(unittest.TestCase):
{'Expect': b'100-continue'})
response = conn.getresponse()
# Assert that we waited for the 100-continue response
- self.assertEqual(wait_mock.call_count, 1)
+ assert wait_mock.call_count == 1
# Now we should verify that our final response is the 200 OK
- self.assertEqual(response.status, 200)
+ assert response.status == 200
def test_handles_expect_100_with_different_reason_phrase(self):
with mock.patch('urllib3.util.wait_for_read') as wait_mock:
@@ -344,12 +345,12 @@ class TestAWSHTTPConnection(unittest.TestCase):
{'Expect': b'100-continue', 'Content-Length': b'4'})
response = conn.getresponse()
# Now we should verify that our final response is the 200 OK.
- self.assertEqual(response.status, 200)
+ assert response.status == 200
# Assert that we waited for the 100-continue response
- self.assertEqual(wait_mock.call_count, 1)
+ assert wait_mock.call_count == 1
# Verify that we went the request body because we got a 100
# continue.
- self.assertIn(b'body', s.sent_data)
+ assert b'body' in s.sent_data
def test_expect_100_sends_connection_header(self):
# When using squid as an HTTP proxy, it will also send
@@ -371,9 +372,9 @@ class TestAWSHTTPConnection(unittest.TestCase):
conn.request('GET', '/bucket/foo', b'body',
{'Expect': b'100-continue'})
# Assert that we waited for the 100-continue response
- self.assertEqual(wait_mock.call_count, 1)
+ assert wait_mock.call_count == 1
response = conn.getresponse()
- self.assertEqual(response.status, 500)
+ assert response.status == 500
def test_expect_100_continue_sends_307(self):
# This is the case where we send a 100 continue and the server
@@ -390,10 +391,10 @@ class TestAWSHTTPConnection(unittest.TestCase):
conn.request('GET', '/bucket/foo', b'body',
{'Expect': b'100-continue'})
# Assert that we waited for the 100-continue response
- self.assertEqual(wait_mock.call_count, 1)
+ assert wait_mock.call_count == 1
response = conn.getresponse()
# Now we should verify that our final response is the 307.
- self.assertEqual(response.status, 307)
+ assert response.status == 307
def test_expect_100_continue_no_response_from_server(self):
with mock.patch('urllib3.util.wait_for_read') as wait_mock:
@@ -411,9 +412,9 @@ class TestAWSHTTPConnection(unittest.TestCase):
conn.request('GET', '/bucket/foo', b'body',
{'Expect': b'100-continue'})
# Assert that we waited for the 100-continue response
- self.assertEqual(wait_mock.call_count, 1)
+ assert wait_mock.call_count == 1
response = conn.getresponse()
- self.assertEqual(response.status, 307)
+ assert response.status == 307
def test_message_body_is_file_like_object(self):
# Shows the server first sending a 100 continue response
@@ -424,7 +425,7 @@ class TestAWSHTTPConnection(unittest.TestCase):
conn.sock = s
conn.request('GET', '/bucket/foo', body)
response = conn.getresponse()
- self.assertEqual(response.status, 200)
+ assert response.status == 200
def test_no_expect_header_set(self):
# Shows the server first sending a 100 continue response
@@ -434,7 +435,7 @@ class TestAWSHTTPConnection(unittest.TestCase):
conn.sock = s
conn.request('GET', '/bucket/foo', b'body')
response = conn.getresponse()
- self.assertEqual(response.status, 200)
+ assert response.status == 200
def test_tunnel_readline_none_bugfix(self):
# Tests whether ``_tunnel`` function is able to work around the
@@ -446,7 +447,7 @@ class TestAWSHTTPConnection(unittest.TestCase):
)
conn._tunnel()
# Ensure proper amount of readline calls were made.
- self.assertEqual(self.mock_response.fp.readline.call_count, 2)
+ assert self.mock_response.fp.readline.call_count == 2
def test_tunnel_readline_normal(self):
# Tests that ``_tunnel`` function behaves normally when it comes
@@ -458,7 +459,7 @@ class TestAWSHTTPConnection(unittest.TestCase):
)
conn._tunnel()
# Ensure proper amount of readline calls were made.
- self.assertEqual(self.mock_response.fp.readline.call_count, 2)
+ assert self.mock_response.fp.readline.call_count == 2
def test_tunnel_raises_socket_error(self):
# Tests that ``_tunnel`` function throws appropriate error when
@@ -468,7 +469,7 @@ class TestAWSHTTPConnection(unittest.TestCase):
port=443,
response=b'HTTP/1.1 404 Not Found\r\n\r\n',
)
- with self.assertRaises(socket.error):
+ with pytest.raises(socket.error):
conn._tunnel()
def test_tunnel_uses_std_lib(self):
@@ -479,7 +480,7 @@ class TestAWSHTTPConnection(unittest.TestCase):
# the ``_tunnel`` method and seeing if the std lib method was called.
with mock.patch('urllib3.connection.HTTPConnection._tunnel') as mock_tunnel:
conn._tunnel()
- self.assertTrue(mock_tunnel.called)
+ assert mock_tunnel.called
def test_encodes_unicode_method_line(self):
s = FakeSocket(b'HTTP/1.1 200 OK\r\n')
@@ -490,7 +491,7 @@ class TestAWSHTTPConnection(unittest.TestCase):
conn.request(u'GET', '/bucket/foo', b'body',
headers={"Utf8-Header": b"\xe5\xb0\x8f"})
response = conn.getresponse()
- self.assertEqual(response.status, 200)
+ assert response.status == 200
def test_state_reset_on_connection_close(self):
# This simulates what urllib3 does with connections
@@ -506,9 +507,9 @@ class TestAWSHTTPConnection(unittest.TestCase):
conn.request('GET', '/bucket/foo', b'body',
{'Expect': b'100-continue'})
- self.assertEqual(wait_mock.call_count, 1)
+ assert wait_mock.call_count == 1
response = conn.getresponse()
- self.assertEqual(response.status, 500)
+ assert response.status == 500
# Now what happens in urllib3 is that when the next
# request comes along and this conection gets checked
@@ -528,20 +529,20 @@ class TestAWSHTTPConnection(unittest.TestCase):
conn.request('GET', '/bucket/foo', b'body',
{'Expect': b'100-continue'})
# Assert that we waited for the 100-continue response
- self.assertEqual(wait_mock.call_count, 2)
+ assert wait_mock.call_count == 2
response = conn.getresponse()
# This should be 200. If it's a 500 then
# the prior response was leaking into our
# current response.,
- self.assertEqual(response.status, 200)
+ assert response.status == 200
class TestAWSHTTPConnectionPool(unittest.TestCase):
def test_global_urllib3_pool_is_unchanged(self):
http_connection_class = HTTPConnectionPool.ConnectionCls
- self.assertIsNot(http_connection_class, AWSHTTPConnection)
+ assert http_connection_class is not AWSHTTPConnection
https_connection_class = HTTPSConnectionPool.ConnectionCls
- self.assertIsNot(https_connection_class, AWSHTTPSConnection)
+ assert https_connection_class is not AWSHTTPSConnection
class TestPrepareRequestDict(unittest.TestCase):
@@ -575,11 +576,10 @@ class TestPrepareRequestDict(unittest.TestCase):
}
self.prepare_base_request_dict(
request_dict, endpoint_url='https://s3.amazonaws.com')
- self.assertEqual(self.base_request_dict['method'], 'GET')
- self.assertEqual(self.base_request_dict['url'],
- 'https://s3.amazonaws.com/')
- self.assertEqual(self.base_request_dict['headers']['User-Agent'],
- self.user_agent)
+ assert self.base_request_dict['method'] == 'GET'
+ assert self.base_request_dict['url'] == 'https://s3.amazonaws.com/'
+ assert self.base_request_dict[
+ 'headers']['User-Agent'] == self.user_agent
def test_prepare_request_dict_for_get_no_user_agent(self):
self.user_agent = None
@@ -589,12 +589,12 @@ class TestPrepareRequestDict(unittest.TestCase):
}
self.prepare_base_request_dict(
request_dict, endpoint_url='https://s3.amazonaws.com')
- self.assertNotIn('User-Agent', self.base_request_dict['headers'])
+ assert 'User-Agent' not in self.base_request_dict['headers']
def test_prepare_request_dict_with_context(self):
context = {'foo': 'bar'}
self.prepare_base_request_dict({}, context=context)
- self.assertEqual(self.base_request_dict['context'], context)
+ assert self.base_request_dict['context'] == context
def test_query_string_serialized_to_url(self):
request_dict = {
@@ -603,9 +603,8 @@ class TestPrepareRequestDict(unittest.TestCase):
'url_path': u'/mybucket'
}
self.prepare_base_request_dict(request_dict)
- self.assertEqual(
- self.base_request_dict['url'],
- 'https://s3.amazonaws.com/mybucket?prefix=foo')
+ expect_url = 'https://s3.amazonaws.com/mybucket?prefix=foo'
+ assert self.base_request_dict['url'] == expect_url
def test_url_path_combined_with_endpoint_url(self):
# This checks the case where a user specifies and
@@ -618,45 +617,40 @@ class TestPrepareRequestDict(unittest.TestCase):
}
endpoint_url = 'https://custom.endpoint/foo/bar'
self.prepare_base_request_dict(request_dict, endpoint_url)
- self.assertEqual(
- self.base_request_dict['url'],
- 'https://custom.endpoint/foo/bar/mybucket?prefix=foo')
+ expected_url = 'https://custom.endpoint/foo/bar/mybucket?prefix=foo'
+ assert self.base_request_dict['url'] == expected_url
def test_url_path_with_trailing_slash(self):
self.prepare_base_request_dict(
{'url_path': u'/mybucket'},
endpoint_url='https://custom.endpoint/foo/bar/')
- self.assertEqual(
- self.base_request_dict['url'],
- 'https://custom.endpoint/foo/bar/mybucket')
+ expected_url = 'https://custom.endpoint/foo/bar/mybucket'
+ assert self.base_request_dict['url'] == expected_url
def test_url_path_is_slash(self):
self.prepare_base_request_dict(
{'url_path': u'/'},
endpoint_url='https://custom.endpoint/foo/bar/')
- self.assertEqual(
- self.base_request_dict['url'],
- 'https://custom.endpoint/foo/bar/')
+ expected_url = 'https://custom.endpoint/foo/bar/'
+ assert self.base_request_dict['url'] == expected_url
def test_url_path_is_slash_with_endpoint_url_no_slash(self):
self.prepare_base_request_dict(
{'url_path': u'/'},
endpoint_url='https://custom.endpoint/foo/bar')
- self.assertEqual(
- self.base_request_dict['url'],
- 'https://custom.endpoint/foo/bar')
+ expected_url = 'https://custom.endpoint/foo/bar'
+ assert self.base_request_dict['url'] == expected_url
def test_custom_endpoint_with_query_string(self):
self.prepare_base_request_dict(
{'url_path': u'/baz', 'query_string': {'x': 'y'}},
endpoint_url='https://custom.endpoint/foo/bar?foo=bar')
- self.assertEqual(
- self.base_request_dict['url'],
- 'https://custom.endpoint/foo/bar/baz?foo=bar&x=y')
+ expected_url = 'https://custom.endpoint/foo/bar/baz?foo=bar&x=y'
+ assert self.base_request_dict['url'] == expected_url
class TestCreateRequestObject(unittest.TestCase):
@@ -673,11 +667,11 @@ class TestCreateRequestObject(unittest.TestCase):
def test_create_request_object(self):
request = create_request_object(self.request_dict)
- self.assertEqual(request.method, self.request_dict['method'])
- self.assertEqual(request.url, self.request_dict['url'])
- self.assertEqual(request.data, self.request_dict['body'])
- self.assertEqual(request.context, self.request_dict['context'])
- self.assertIn('User-Agent', request.headers)
+ assert request.method == self.request_dict['method']
+ assert request.url == self.request_dict['url']
+ assert request.data == self.request_dict['body']
+ assert request.context == self.request_dict['context']
+ assert 'User-Agent' in request.headers
class TestHeadersDict(unittest.TestCase):
@@ -686,7 +680,7 @@ class TestHeadersDict(unittest.TestCase):
def test_get_insensitive(self):
self.headers['foo'] = 'bar'
- self.assertEqual(self.headers['FOO'], 'bar')
+ assert self.headers['FOO'] == 'bar'
def test_set_insensitive(self):
self.headers['foo'] = 'bar'
@@ -695,19 +689,19 @@ class TestHeadersDict(unittest.TestCase):
def test_del_insensitive(self):
self.headers['foo'] = 'bar'
- self.assertEqual(self.headers['FOO'], 'bar')
+ assert self.headers['FOO'] == 'bar'
del self.headers['FoO']
- with self.assertRaises(KeyError):
+ with pytest.raises(KeyError):
self.headers['foo']
def test_iteration(self):
self.headers['FOO'] = 'bar'
self.headers['dead'] = 'beef'
- self.assertIn('FOO', list(self.headers))
- self.assertIn('dead', list(self.headers))
+ assert 'FOO' in list(self.headers)
+ assert 'dead' in list(self.headers)
headers_items = list(self.headers.items())
- self.assertIn(('FOO', 'bar'), headers_items)
- self.assertIn(('dead', 'beef'), headers_items)
+ assert ('FOO', 'bar') in headers_items
+ assert ('dead', 'beef') in headers_items
if __name__ == "__main__":
diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py
index 3e12dddf..c3654b6a 100644
--- a/tests/unit/test_client.py
+++ b/tests/unit/test_client.py
@@ -13,6 +13,7 @@
import botocore.config
from tests import unittest
from tests import mock
+import pytest
import botocore
from botocore import utils
@@ -178,7 +179,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
creator = self.create_client_creator()
service_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
- self.assertTrue(service_client.__class__.__name__, 'MyService')
+ assert service_client.__class__.__name__ == 'MyService'
def test_client_name_with_amazon(self):
self.service_description['metadata']['serviceFullName'] = (
@@ -186,7 +187,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
creator = self.create_client_creator()
service_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
- self.assertTrue(service_client.__class__.__name__, 'MyService')
+ assert service_client.__class__.__name__ == 'MyService'
def test_client_name_using_abreviation(self):
self.service_description['metadata']['serviceAbbreviation'] = (
@@ -194,7 +195,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
creator = self.create_client_creator()
service_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
- self.assertTrue(service_client.__class__.__name__, 'Abbreviation')
+ assert service_client.__class__.__name__ == 'Abbreviation'
def test_client_name_with_non_alphabet_characters(self):
self.service_description['metadata']['serviceFullName'] = (
@@ -202,20 +203,20 @@ class TestAutoGeneratedClient(unittest.TestCase):
creator = self.create_client_creator()
service_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
- self.assertTrue(service_client.__class__.__name__, 'MyService')
+ assert service_client.__class__.__name__ == 'MyService'
def test_client_name_with_no_full_name_or_abbreviation(self):
del self.service_description['metadata']['serviceFullName']
creator = self.create_client_creator()
service_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
- self.assertTrue(service_client.__class__.__name__, 'myservice')
+ assert service_client.__class__.__name__ == 'myservice'
def test_client_generated_from_model(self):
creator = self.create_client_creator()
service_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
- self.assertTrue(hasattr(service_client, 'test_operation'))
+ assert hasattr(service_client, 'test_operation')
def test_client_method_docstring(self):
creator = self.create_client_creator()
@@ -235,7 +236,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
':param Foo: **[REQUIRED]** Documents Foo'
]
for line in ref_docstring_lines:
- self.assertIn(line, method_docstring)
+ assert line in method_docstring
def test_client_method_help(self):
creator = self.create_client_creator()
@@ -257,13 +258,13 @@ class TestAutoGeneratedClient(unittest.TestCase):
':param Foo: **[REQUIRED]** Documents Foo'
]
for line in ref_docstring_lines:
- self.assertIn(line, method_docstring)
+ assert line in method_docstring
def test_client_create_unicode(self):
creator = self.create_client_creator()
service_client = creator.create_client(
u'myservice', 'us-west-2', credentials=self.credentials)
- self.assertTrue(hasattr(service_client, 'test_operation'))
+ assert hasattr(service_client, 'test_operation')
def test_client_has_region_name_on_meta(self):
creator = self.create_client_creator()
@@ -271,22 +272,20 @@ class TestAutoGeneratedClient(unittest.TestCase):
self.endpoint.region_name = region_name
service_client = creator.create_client(
'myservice', region_name, credentials=self.credentials)
- self.assertEqual(service_client.meta.region_name, region_name)
+ assert service_client.meta.region_name == region_name
def test_client_has_endpoint_url_on_meta(self):
creator = self.create_client_creator()
self.endpoint.host = 'https://foo.bar'
service_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
- self.assertEqual(service_client.meta.endpoint_url,
- 'https://foo.bar')
+ assert service_client.meta.endpoint_url == 'https://foo.bar'
def test_client_has_standard_partition_on_meta(self):
creator = self.create_client_creator()
service_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
- self.assertEqual(service_client.meta.partition,
- 'aws')
+ assert service_client.meta.partition == 'aws'
def test_client_has_non_standard_partition_on_meta(self):
creator = self.create_client_creator()
@@ -298,23 +297,20 @@ class TestAutoGeneratedClient(unittest.TestCase):
}
service_client = creator.create_client(
'myservice', 'cn-north-1', credentials=self.credentials)
- self.assertEqual(service_client.meta.partition,
- 'aws-cn')
+ assert service_client.meta.partition == 'aws-cn'
def test_client_has_exceptions_attribute(self):
creator = self.create_client_creator()
service_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
- self.assertTrue(hasattr(service_client, 'exceptions'))
+ assert hasattr(service_client, 'exceptions')
def test_client_has_modeled_exceptions(self):
creator = self.create_client_creator()
service_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
- self.assertTrue(
- issubclass(service_client.exceptions.TestOperationException,
+ assert issubclass(service_client.exceptions.TestOperationException,
client.ClientError)
- )
def test_api_version_is_passed_to_loader_if_provided(self):
creator = self.create_client_creator()
@@ -329,7 +325,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
def test_create_client_class_creates_class(self):
creator = self.create_client_creator()
client_class = creator.create_client_class('myservice')
- self.assertTrue(hasattr(client_class, 'test_operation'))
+ assert hasattr(client_class, 'test_operation')
def test_create_client_class_forwards_api_version(self):
creator = self.create_client_creator()
@@ -352,9 +348,9 @@ class TestAutoGeneratedClient(unittest.TestCase):
creator = self.create_client_creator()
service_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
- self.assertEqual(service_client.meta.region_name, 'us-west-2')
+ assert service_client.meta.region_name == 'us-west-2'
call_args = mock_signer.call_args
- self.assertEqual(credential_scope_region, call_args[0][1])
+ assert call_args[0][1] == credential_scope_region
def test_client_uses_signing_region_from_credential_scope(self):
with mock.patch('botocore.args.RequestSigner') as mock_signer:
@@ -371,11 +367,11 @@ class TestAutoGeneratedClient(unittest.TestCase):
service_name='myservice', region_name='us-west-2',
credentials=self.credentials)
# Use the resolved region as the region value.
- self.assertEqual(service_client.meta.region_name, 'us-west-2')
+ assert service_client.meta.region_name == 'us-west-2'
# Ensure that we use the credential scope region for signing,
# and not the resolved region name.
call_args = mock_signer.call_args
- self.assertEqual(credential_scope_region, call_args[0][1])
+ assert call_args[0][1] == credential_scope_region
def test_client_uses_signing_name_from_credential_scope(self):
with mock.patch('botocore.args.RequestSigner') as mock_signer:
@@ -391,8 +387,8 @@ class TestAutoGeneratedClient(unittest.TestCase):
service_name='myservice', region_name='us-west-2',
credentials=self.credentials)
call_args = mock_signer.call_args
- self.assertEqual('MyService', call_args[0][0])
- self.assertEqual('override', call_args[0][2])
+ assert call_args[0][0] == 'MyService'
+ assert call_args[0][2] == 'override'
def test_client_uses_given_region_name_and_endpoint_url_when_present(self):
with mock.patch('botocore.args.RequestSigner') as mock_signer:
@@ -408,9 +404,9 @@ class TestAutoGeneratedClient(unittest.TestCase):
service_client = creator.create_client(
service_name='myservice', region_name='us-west-2',
credentials=self.credentials, endpoint_url='https://foo')
- self.assertEqual(service_client.meta.region_name, 'us-west-2')
+ assert service_client.meta.region_name == 'us-west-2'
call_args = mock_signer.call_args
- self.assertEqual('us-west-2', call_args[0][1])
+ assert call_args[0][1] == 'us-west-2'
def test_client_uses_signing_name_from_model_if_present_if_resolved(self):
self.service_description['metadata']['signingName'] = 'otherName'
@@ -425,9 +421,9 @@ class TestAutoGeneratedClient(unittest.TestCase):
service_client = creator.create_client(
service_name='myservice', region_name='us-west-2',
credentials=self.credentials, endpoint_url='https://foo')
- self.assertEqual(service_client.meta.region_name, 'us-west-2')
+ assert service_client.meta.region_name == 'us-west-2'
call_args = mock_signer.call_args[0]
- self.assertEqual('otherName', call_args[2])
+ assert call_args[2] == 'otherName'
def test_client_uses_signing_name_even_with_no_resolve(self):
self.service_description['metadata']['signingName'] = 'otherName'
@@ -437,9 +433,9 @@ class TestAutoGeneratedClient(unittest.TestCase):
service_client = creator.create_client(
service_name='myservice', region_name='us-west-2',
credentials=self.credentials, endpoint_url='https://foo')
- self.assertEqual(service_client.meta.region_name, 'us-west-2')
+ assert service_client.meta.region_name == 'us-west-2'
call_args = mock_signer.call_args[0]
- self.assertEqual('otherName', call_args[2])
+ assert call_args[2] == 'otherName'
@mock.patch('botocore.args.RequestSigner')
def test_client_signature_no_override(self, request_signer):
@@ -475,8 +471,8 @@ class TestAutoGeneratedClient(unittest.TestCase):
def test_client_method_to_api_mapping(self):
creator = self.create_client_creator()
service_client = creator.create_client('myservice', 'us-west-2')
- self.assertEqual(service_client.meta.method_to_api_mapping,
- {'test_operation': 'TestOperation'})
+ assert service_client.meta.method_to_api_mapping == {
+ 'test_operation': 'TestOperation'}
def test_anonymous_client_request(self):
creator = self.create_client_creator()
@@ -487,15 +483,15 @@ class TestAutoGeneratedClient(unittest.TestCase):
service_client.test_operation(Foo='one')
# Make sure a request has been attempted
- self.assertTrue(self.endpoint.make_request.called)
+ assert self.endpoint.make_request.called is True
# Make sure the request parameters do NOT include auth
# information. The service defined above for these tests
# uses sigv4 by default (which we disable).
params = dict((k.lower(), v) for k, v in
self.endpoint.make_request.call_args[0][1].items())
- self.assertNotIn('authorization', params)
- self.assertNotIn('x-amz-signature', params)
+ assert 'authorization' not in params
+ assert 'x-amz-signature' not in params
def test_client_user_agent_in_request(self):
creator = self.create_client_creator()
@@ -504,10 +500,10 @@ class TestAutoGeneratedClient(unittest.TestCase):
service_client.test_operation(Foo='one')
- self.assertTrue(self.endpoint.make_request.called)
+ assert self.endpoint.make_request.called
params = dict((k.lower(), v) for k, v in
self.endpoint.make_request.call_args[0][1].items())
- self.assertEqual(params['headers']['User-Agent'], 'user-agent')
+ assert params['headers']['User-Agent'] == 'user-agent'
def test_client_custom_user_agent_in_request(self):
creator = self.create_client_creator()
@@ -517,10 +513,10 @@ class TestAutoGeneratedClient(unittest.TestCase):
service_client.test_operation(Foo='one')
- self.assertTrue(self.endpoint.make_request.called)
+ assert self.endpoint.make_request.called
params = dict((k.lower(), v) for k, v in
self.endpoint.make_request.call_args[0][1].items())
- self.assertEqual(params['headers']['User-Agent'], 'baz')
+ assert params['headers']['User-Agent'] == 'baz'
def test_client_custom_user_agent_extra_in_request(self):
creator = self.create_client_creator()
@@ -529,33 +525,31 @@ class TestAutoGeneratedClient(unittest.TestCase):
'myservice', 'us-west-2', client_config=config)
service_client.test_operation(Foo='one')
headers = self.endpoint.make_request.call_args[0][1]['headers']
- self.assertEqual(headers['User-Agent'], 'user-agent extrastuff')
+ assert headers['User-Agent'] == 'user-agent extrastuff'
def test_client_registers_request_created_handler(self):
event_emitter = self.create_mock_emitter()
creator = self.create_client_creator(event_emitter=event_emitter)
creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
- self.assertIn(
- mock.call('request-created.myservice', mock.ANY),
- event_emitter.register.call_args_list)
+ assert mock.call('request-created.myservice',
+ mock.ANY) in event_emitter.register.call_args_list
def test_client_makes_call(self):
creator = self.create_client_creator()
service_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
- self.assertTrue(self.endpoint_creator.create_endpoint.called)
+ assert self.endpoint_creator.create_endpoint.called
response = service_client.test_operation(Foo='one', Bar='two')
- self.assertEqual(response, {})
+ assert response == {}
def test_client_error_message_for_positional_args(self):
creator = self.create_client_creator()
service_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
- with six.assertRaisesRegex(self, TypeError,
- 'only accepts keyword arguments'):
+ with pytest.raises(TypeError, match='only accepts keyword arguments'):
service_client.test_operation('foo')
@mock.patch('botocore.args.RequestSigner.sign')
@@ -581,7 +575,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
service_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
- with self.assertRaises(ParamValidationError):
+ with pytest.raises(ParamValidationError):
self.make_api_call_with_missing_param(service_client)
def test_client_doesnt_validate_params_when_validation_disabled(self):
@@ -617,56 +611,55 @@ class TestAutoGeneratedClient(unittest.TestCase):
'myservice', 'us-west-2',
client_config=botocore.config.Config(connect_timeout=123, read_timeout=234))
call_kwargs = self.endpoint_creator.create_endpoint.call_args[1]
- self.assertEqual(call_kwargs['timeout'], (123, 234))
+ assert call_kwargs['timeout'] == (123, 234)
def test_client_with_custom_connect_timeout(self):
self.create_client_creator().create_client(
'myservice', 'us-west-2',
client_config=botocore.config.Config(connect_timeout=123))
call_kwargs = self.endpoint_creator.create_endpoint.call_args[1]
- self.assertEqual(call_kwargs['timeout'], (123, DEFAULT_TIMEOUT))
+ assert call_kwargs['timeout'] == (123, DEFAULT_TIMEOUT)
def test_client_with_custom_read_timeout(self):
self.create_client_creator().create_client(
'myservice', 'us-west-2',
client_config=botocore.config.Config(read_timeout=234))
call_kwargs = self.endpoint_creator.create_endpoint.call_args[1]
- self.assertEqual(call_kwargs['timeout'], (DEFAULT_TIMEOUT, 234))
+ assert call_kwargs['timeout'] == (DEFAULT_TIMEOUT, 234)
def test_client_with_custom_neither_timeout(self):
self.create_client_creator().create_client('myservice', 'us-west-2')
call_kwargs = self.endpoint_creator.create_endpoint.call_args[1]
- self.assertEqual(call_kwargs['timeout'],
- (DEFAULT_TIMEOUT, DEFAULT_TIMEOUT))
+ assert call_kwargs['timeout'] == (DEFAULT_TIMEOUT, DEFAULT_TIMEOUT)
def test_client_with_custom_params(self):
creator = self.create_client_creator()
creator.create_client('myservice', 'us-west-2',
is_secure=False, verify=False)
call_kwargs = self.endpoint_creator.create_endpoint.call_args[1]
- self.assertFalse(call_kwargs['verify'])
- self.assertNotIn('is_secure', call_kwargs)
+ assert not call_kwargs['verify']
+ assert 'is_secure' not in call_kwargs
def test_client_with_endpoint_url(self):
creator = self.create_client_creator()
creator.create_client('myservice', 'us-west-2',
endpoint_url='http://custom.foo')
call_kwargs = self.endpoint_creator.create_endpoint.call_args[1]
- self.assertEqual(call_kwargs['endpoint_url'], 'http://custom.foo')
+ assert call_kwargs['endpoint_url'] == 'http://custom.foo'
def test_client_can_use_guessed_endpoints(self):
# Ensure the resolver returns None (meaning a guess is made)
self.resolver.construct_endpoint.return_value = None
creator = self.create_client_creator()
client = creator.create_client('myservice', region_name='invalid')
- self.assertEqual('invalid', client.meta.region_name)
+ assert 'invalid' == client.meta.region_name
def test_client_with_response_parser_factory(self):
factory = mock.Mock()
creator = self.create_client_creator(response_parser_factory=factory)
creator.create_client('myservice', 'us-west-2')
call_kwargs = self.endpoint_creator.create_endpoint.call_args[1]
- self.assertEqual(call_kwargs['response_parser_factory'], factory)
+ assert call_kwargs['response_parser_factory'] == factory
def test_operation_cannot_paginate(self):
pagination_config = {
@@ -687,7 +680,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
self.service_description, pagination_config]
creator = self.create_client_creator()
service_client = creator.create_client('myservice', 'us-west-2')
- self.assertFalse(service_client.can_paginate('test_operation'))
+ assert not service_client.can_paginate('test_operation')
def test_operation_can_paginate(self):
pagination_config = {
@@ -705,10 +698,10 @@ class TestAutoGeneratedClient(unittest.TestCase):
self.service_description, pagination_config]
creator = self.create_client_creator()
service_client = creator.create_client('myservice', 'us-west-2')
- self.assertTrue(service_client.can_paginate('test_operation'))
+ assert service_client.can_paginate('test_operation')
# Also, the config is cached, but we want to make sure we get
# the same answer when we ask again.
- self.assertTrue(service_client.can_paginate('test_operation'))
+ assert service_client.can_paginate('test_operation')
def test_service_has_no_pagination_configs(self):
# This is the case where there is an actual *.paginator.json, file,
@@ -720,7 +713,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
exceptions.DataNotFoundError(data_path='/foo')]
creator = self.create_client_creator()
service_client = creator.create_client('myservice', 'us-west-2')
- self.assertFalse(service_client.can_paginate('test_operation'))
+ assert not service_client.can_paginate('test_operation')
def test_waiter_config_uses_service_name_not_endpoint_prefix(self):
waiter_config = {
@@ -737,7 +730,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
# 'myservice', we use 'other-service-name' for waiters/paginators, etc.
service_client = creator.create_client('other-service-name',
'us-west-2')
- self.assertEqual(service_client.waiter_names, [])
+ assert service_client.waiter_names == []
# Note we're using other-service-name, not
# 'myservice', which is the endpointPrefix.
self.loader.load_service_model.assert_called_with(
@@ -767,9 +760,8 @@ class TestAutoGeneratedClient(unittest.TestCase):
]
creator = self.create_client_creator()
service_client = creator.create_client('myservice', 'us-west-2')
- self.assertEqual(sorted(service_client.waiter_names),
- sorted(['waiter1', 'waiter2']))
- self.assertTrue(hasattr(service_client.get_waiter('waiter1'), 'wait'))
+ assert sorted(service_client.waiter_names) == ['waiter1', 'waiter2']
+ assert hasattr(service_client.get_waiter('waiter1'), 'wait')
def test_service_has_no_waiter_configs(self):
self.loader.load_service_model.side_effect = [
@@ -777,8 +769,8 @@ class TestAutoGeneratedClient(unittest.TestCase):
exceptions.DataNotFoundError(data_path='/foo')]
creator = self.create_client_creator()
service_client = creator.create_client('myservice', 'us-west-2')
- self.assertEqual(service_client.waiter_names, [])
- with self.assertRaises(ValueError):
+ assert service_client.waiter_names == []
+ with pytest.raises(ValueError):
service_client.get_waiter("unknown_waiter")
def test_service_has_retry_event(self):
@@ -837,7 +829,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
creator.create_client('myservice', 'us-west-2')
for call in event_emitter.register.call_args_list:
- self.assertNotIn('needs-retry', call[0][0])
+ assert 'needs-retry' not in call[0][0]
def test_emits_after_call_error(self):
event_emitter = hooks.HierarchicalEmitter()
@@ -854,11 +846,10 @@ class TestAutoGeneratedClient(unittest.TestCase):
self.endpoint.make_request.side_effect = raised_error
creator = self.create_client_creator(event_emitter=event_emitter)
client = creator.create_client('myservice', 'us-west-2')
- with self.assertRaises(RuntimeError):
+ with pytest.raises(RuntimeError):
client.test_operation(Foo='one', Bar='two')
- self.assertEqual(
- recorded_kwargs,
- [{'exception': raised_error, 'context': mock.ANY}])
+ assert recorded_kwargs == [
+ {'exception': raised_error, 'context': mock.ANY}]
def test_can_override_max_attempts(self):
retry_handler_factory = mock.Mock(botocore.retryhandler)
@@ -888,7 +879,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
'myservice', 'us-west-2',
client_config=botocore.config.Config(
retries={'mode': 'standard'}))
- self.assertTrue(standard.register_retry_handler.called)
+ assert standard.register_retry_handler.called
def test_can_register_standard_retry_mode_from_config_store(self):
fake_env = {'AWS_RETRY_MODE': 'standard'}
@@ -902,7 +893,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
creator = self.create_client_creator(config_store=config_store)
with mock.patch('botocore.client.standard') as standard:
creator.create_client( 'myservice', 'us-west-2')
- self.assertTrue(standard.register_retry_handler.called)
+ assert standard.register_retry_handler.called
def test_try_to_paginate_non_paginated(self):
self.loader.load_service_model.side_effect = [
@@ -911,7 +902,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
]
creator = self.create_client_creator()
service_client = creator.create_client('myservice', 'us-west-2')
- with self.assertRaises(exceptions.OperationNotPageableError):
+ with pytest.raises(exceptions.OperationNotPageableError):
service_client.get_paginator('test_operation')
def test_successful_pagination_object_created(self):
@@ -935,7 +926,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
paginator = service_client.get_paginator('test_operation')
# The pagination logic itself is tested elsewhere (test_paginate.py),
# but we can at least make sure it looks like a paginator.
- self.assertTrue(hasattr(paginator, 'paginate'))
+ assert hasattr(paginator, 'paginate')
def test_paginator_class_name_from_client(self):
pagination_config = {
@@ -956,9 +947,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
creator = self.create_client_creator()
service_client = creator.create_client('myservice', 'us-west-2')
paginator = service_client.get_paginator('test_operation')
- self.assertEqual(
- paginator.__class__.__name__,
- 'MyService.Paginator.TestOperation')
+ assert paginator.__class__.__name__ == 'MyService.Paginator.TestOperation'
def test_paginator_help_from_client(self):
pagination_config = {
@@ -1017,7 +1006,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
' :returns: None',
]
for line in lines:
- self.assertIn(line, contents)
+ assert line in contents
def test_can_set_credentials_in_client_init(self):
creator = self.create_client_creator()
@@ -1029,7 +1018,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
# Verify that we create an endpoint with a credentials object
# matching our creds arguments.
- self.assertEqual(client._request_signer._credentials, credentials)
+ assert client._request_signer._credentials == credentials
def test_event_emitted_when_invoked(self):
event_emitter = hooks.HierarchicalEmitter()
@@ -1042,7 +1031,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
service_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
service_client.test_operation(Foo='one', Bar='two')
- self.assertEqual(len(calls), 1)
+ assert len(calls) == 1
def test_events_are_per_client(self):
event_emitter = hooks.HierarchicalEmitter()
@@ -1067,16 +1056,16 @@ class TestAutoGeneratedClient(unittest.TestCase):
# So if we invoke the first client.
first_client.test_operation(Foo='one', Bar='two')
# Only first_calls is populated, not second_calls.
- self.assertEqual(len(first_calls), 1)
- self.assertEqual(len(second_calls), 0)
+ assert len(first_calls) == 1
+ assert len(second_calls) == 0
# If we invoke an operation from the second client,
# only second_calls will be populated, not first_calls.
second_client.test_operation(Foo='one', Bar='two')
# first_calls == 1 from the previous first_client.test_operation()
# call.
- self.assertEqual(len(first_calls), 1)
- self.assertEqual(len(second_calls), 1)
+ assert len(first_calls) == 1
+ assert len(second_calls) == 1
def test_clients_inherit_handlers_from_session(self):
# Even though clients get their own event emitters, they still
@@ -1097,13 +1086,13 @@ class TestAutoGeneratedClient(unittest.TestCase):
first_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
first_client.test_operation(Foo='one', Bar='two')
- self.assertEqual(len(base_calls), 1)
+ assert len(base_calls) == 1
# Same thing if we create another client.
second_client = creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
second_client.test_operation(Foo='one', Bar='two')
- self.assertEqual(len(base_calls), 2)
+ assert len(base_calls) == 2
def test_clients_inherit_only_at_create_time(self):
# If event handlers are added to the copied event emitter
@@ -1123,15 +1112,15 @@ class TestAutoGeneratedClient(unittest.TestCase):
# 3. The client will _not_ see this because it already has its
# own copy of the event handlers.
first_client.test_operation(Foo='one', Bar='two')
- self.assertEqual(len(base_calls), 0)
+ assert len(base_calls) == 0
def test_clients_have_meta_object(self):
creator = self.create_client_creator()
service_client = creator.create_client('myservice', 'us-west-2')
- self.assertTrue(hasattr(service_client, 'meta'))
- self.assertTrue(hasattr(service_client.meta, 'events'))
+ assert hasattr(service_client, 'meta')
+ assert hasattr(service_client.meta, 'events')
# Sanity check the event emitter has an .emit() method.
- self.assertTrue(hasattr(service_client.meta.events, 'emit'))
+ assert hasattr(service_client.meta.events, 'emit')
def test_client_register_seperate_unique_id_event(self):
event_emitter = hooks.HierarchicalEmitter()
@@ -1148,27 +1137,27 @@ class TestAutoGeneratedClient(unittest.TestCase):
# Ensure both clients can register a function with an unique id
client1_responses = client1.meta.events.emit('some-event')
- self.assertEqual(len(client1_responses), 1)
- self.assertEqual(client1_responses[0][1], 'foo')
+ assert len(client1_responses) == 1
+ assert client1_responses[0][1] == 'foo'
client2_responses = client2.meta.events.emit('some-event')
- self.assertEqual(len(client2_responses), 1)
- self.assertEqual(client2_responses[0][1], 'foo')
+ assert len(client2_responses) == 1
+ assert client2_responses[0][1] == 'foo'
# Ensure when a client is unregistered the other client has
# the unique-id event still registered.
client1.meta.events.unregister('some-event', ping, 'my-unique-id')
client1_responses = client1.meta.events.emit('some-event')
- self.assertEqual(len(client1_responses), 0)
+ assert len(client1_responses) == 0
client2_responses = client2.meta.events.emit('some-event')
- self.assertEqual(len(client2_responses), 1)
- self.assertEqual(client2_responses[0][1], 'foo')
+ assert len(client2_responses) == 1
+ assert client2_responses[0][1] == 'foo'
# Ensure that the other client can unregister the event
client2.meta.events.unregister('some-event', ping, 'my-unique-id')
client2_responses = client2.meta.events.emit('some-event')
- self.assertEqual(len(client2_responses), 0)
+ assert len(client2_responses) == 0
def test_client_created_emits_events(self):
called = []
@@ -1183,8 +1172,8 @@ class TestAutoGeneratedClient(unittest.TestCase):
creator.create_client(
'myservice', 'us-west-2', credentials=self.credentials)
- self.assertEqual(len(called), 1)
- self.assertIn('test_operation', called[0])
+ assert len(called) == 1
+ assert 'test_operation' in called[0]
def test_client_method_called_event(self):
event_emitter = hooks.HierarchicalEmitter()
@@ -1206,16 +1195,16 @@ class TestAutoGeneratedClient(unittest.TestCase):
service_client.test_operation(**params)
# Ensure that the initial params were not modified in the handler
- self.assertEqual(params, {'Foo': 'one', 'Bar': 'two'})
+ assert params == {'Foo': 'one', 'Bar': 'two'}
# Ensure the handler passed on the correct param values.
body = self.endpoint.make_request.call_args[0][1]['body']
- self.assertEqual(body['Foo'], 'zero')
+ assert body['Foo'] == 'zero'
def test_client_default_for_s3_addressing_style(self):
creator = self.create_client_creator()
client = creator.create_client('myservice', 'us-west-2')
- self.assertEqual(client.meta.config.s3, None)
+ assert client.meta.config.s3 is None
def test_client_s3_addressing_style_with_config(self):
creator = self.create_client_creator()
@@ -1223,8 +1212,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
'myservice', 'us-west-2',
client_config=botocore.config.Config(s3={'addressing_style': 'auto'})
)
- self.assertEqual(
- my_client.meta.config.s3['addressing_style'], 'auto')
+ assert my_client.meta.config.s3['addressing_style'] == 'auto'
def test_client_s3_addressing_style_with_bad_value(self):
creator = self.create_client_creator()
@@ -1232,7 +1220,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
'myservice', 'us-west-2',
scoped_config={'s3': ''},
)
- self.assertIsNone(client.meta.config.s3)
+ assert client.meta.config.s3 is None
def test_client_s3_addressing_style_with_config_store(self):
self.config_store.set_config_variable(
@@ -1240,11 +1228,10 @@ class TestAutoGeneratedClient(unittest.TestCase):
)
creator = self.create_client_creator()
client = creator.create_client('myservice', 'us-west-2')
- self.assertEqual(
- client.meta.config.s3['addressing_style'], 'virtual')
+ assert client.meta.config.s3['addressing_style'] == 'virtual'
def test_client_s3_addressing_style_with_incorrect_style(self):
- with self.assertRaises(InvalidS3AddressingStyleError):
+ with pytest.raises(InvalidS3AddressingStyleError):
botocore.config.Config(s3={'addressing_style': 'foo'})
def test_client_s3_addressing_style_config_overrides_config_store(self):
@@ -1256,8 +1243,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
client_config=botocore.config.Config(
s3={'addressing_style': 'auto'})
)
- self.assertEqual(
- my_client.meta.config.s3['addressing_style'], 'auto')
+ assert my_client.meta.config.s3['addressing_style'] == 'auto'
def test_client_payload_signing_from_config_store(self):
self.config_store.set_config_variable(
@@ -1265,8 +1251,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
)
creator = self.create_client_creator()
my_client = creator.create_client('myservice', 'us-west-2')
- self.assertEqual(
- my_client.meta.config.s3['payload_signing_enabled'], True)
+ assert my_client.meta.config.s3['payload_signing_enabled'] is True
def test_client_payload_signing_from_client_config(self):
creator = self.create_client_creator()
@@ -1275,8 +1260,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
client_config=client.Config(s3={'payload_signing_enabled': True})
)
- self.assertEqual(
- my_client.meta.config.s3['payload_signing_enabled'], True)
+ assert my_client.meta.config.s3['payload_signing_enabled'] is True
def test_client_payload_signing_client_config_overrides_scoped(self):
creator = self.create_client_creator()
@@ -1286,8 +1270,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
client_config=client.Config(s3={'payload_signing_enabled': True})
)
- self.assertEqual(
- my_client.meta.config.s3['payload_signing_enabled'], True)
+ assert my_client.meta.config.s3['payload_signing_enabled'] is True
def test_client_s3_accelerate_from_config_store(self):
self.config_store.set_config_variable(
@@ -1295,8 +1278,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
)
creator = self.create_client_creator()
my_client = creator.create_client('myservice', 'us-west-2')
- self.assertEqual(
- my_client.meta.config.s3['use_accelerate_endpoint'], True)
+ assert my_client.meta.config.s3['use_accelerate_endpoint'] is True
def test_client_s3_accelerate_from_client_config(self):
creator = self.create_client_creator()
@@ -1305,8 +1287,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
client_config=client.Config(s3={'use_accelerate_endpoint': True})
)
- self.assertEqual(
- my_client.meta.config.s3['use_accelerate_endpoint'], True)
+ assert my_client.meta.config.s3['use_accelerate_endpoint'] is True
def test_client_s3_accelerate_client_config_overrides_config_store(self):
self.config_store.set_config_variable(
@@ -1318,8 +1299,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
client_config=client.Config(s3={'use_accelerate_endpoint': True})
)
- self.assertEqual(
- my_client.meta.config.s3['use_accelerate_endpoint'], True)
+ assert my_client.meta.config.s3['use_accelerate_endpoint'] is True
def test_before_call_short_circuits_request(self):
def return_mock_tuple(**kwargs):
@@ -1333,7 +1313,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
'myservice', 'us-west-2')
service_client.test_operation(Foo='one')
- self.assertFalse(self.endpoint.make_request.called)
+ assert not self.endpoint.make_request.called
def test_getattr_emits_event(self):
emitter = self.create_mock_emitter()
@@ -1345,7 +1325,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
# Assert that the event hasn't fired yet
emitter.emit_until_response.assert_not_called()
- with self.assertRaises(AttributeError):
+ with pytest.raises(AttributeError):
service_client.attribute_that_does_not_exist
emitter.emit_until_response.assert_called_once_with(
@@ -1362,7 +1342,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
value = service_client.attribute_that_does_not_exist
- self.assertEqual(value, 'success')
+ assert value == 'success'
def _create_hostname_binding_client(self, *args, **kwargs):
test_operation = self.service_description['operations']['TestOperation']
@@ -1378,13 +1358,13 @@ class TestAutoGeneratedClient(unittest.TestCase):
client.test_operation(Foo='bound')
expected_url = 'https://bound.myservice.amazonaws.com/'
- self.assertTrue(self.endpoint.make_request.called)
+ assert self.endpoint.make_request.called
request_dict = self.endpoint.make_request.call_args[0][1]
- self.assertEqual(request_dict['url'], expected_url)
+ assert request_dict['url'] == expected_url
def test_client_operation_hostname_binding_validation(self):
client = self._create_hostname_binding_client('us-west-2')
- with self.assertRaises(ParamValidationError):
+ with pytest.raises(ParamValidationError):
client.test_operation(Foo='')
def test_client_operation_hostname_binding_configuration(self):
@@ -1395,9 +1375,9 @@ class TestAutoGeneratedClient(unittest.TestCase):
client.test_operation(Foo='baz')
expected_url = 'https://myservice.amazonaws.com/'
- self.assertTrue(self.endpoint.make_request.called)
+ assert self.endpoint.make_request.called
request_dict = self.endpoint.make_request.call_args[0][1]
- self.assertEqual(request_dict['url'], expected_url)
+ assert request_dict['url'] == expected_url
class TestClientErrors(TestAutoGeneratedClient):
@@ -1413,7 +1393,7 @@ class TestClientErrors(TestAutoGeneratedClient):
with Stubber(client) as stub:
stub.add_client_error(
'test_operation', 'TestOperationErrorCode', 'error occurred')
- with self.assertRaises(client.exceptions.TestOperationException):
+ with pytest.raises(client.exceptions.TestOperationException):
client.test_operation(Foo='one', Bar='two')
def test_error_with_no_wire_code(self):
@@ -1429,7 +1409,7 @@ class TestClientErrors(TestAutoGeneratedClient):
# This is needed becasue the error could be a subclass of
# ClientError.
# We explicitly want it to be a generic ClientError though
- self.assertEqual(e.__class__, exceptions.ClientError)
+ assert isinstance(e, exceptions.ClientError)
def test_error_with_dot_separated_code(self):
creator = self.create_client_creator()
@@ -1445,7 +1425,7 @@ class TestClientErrors(TestAutoGeneratedClient):
# This is needed becasue the error could be a subclass of
# ClientError.
# We explicitly want it to be a generic ClientError though
- self.assertEqual(e.__class__, exceptions.ClientError)
+ assert isinstance(e, exceptions.ClientError)
def test_error_with_empty_message(self):
creator = self.create_client_creator()
@@ -1455,7 +1435,7 @@ class TestClientErrors(TestAutoGeneratedClient):
with Stubber(client) as stub:
stub.add_client_error(
'test_operation', 'TestOperationErrorCode')
- with self.assertRaises(client.exceptions.TestOperationException):
+ with pytest.raises(client.exceptions.TestOperationException):
client.test_operation(Foo='one', Bar='two')
def test_error_with_empty_code(self):
@@ -1471,7 +1451,7 @@ class TestClientErrors(TestAutoGeneratedClient):
# This is needed becasue the error could be a subclass of
# ClientError.
# We explicitly want it to be a generic ClientError though
- self.assertEqual(e.__class__, exceptions.ClientError)
+ assert isinstance(e, exceptions.ClientError)
def test_error_with_missing_code(self):
error_response = {'Error': {'Message': 'error occurred'}}
@@ -1490,7 +1470,7 @@ class TestClientErrors(TestAutoGeneratedClient):
# This is needed becasue the error could be a subclass of
# ClientError.
# We explicitly want it to be a generic ClientError though
- self.assertEqual(e.__class__, exceptions.ClientError)
+ assert isinstance(e, exceptions.ClientError)
def test_error_with_empty_contents(self):
error_response = {'Error': {}}
@@ -1509,7 +1489,7 @@ class TestClientErrors(TestAutoGeneratedClient):
# This is needed becasue the error could be a subclass of
# ClientError.
# We explicitly want it to be a generic ClientError though
- self.assertEqual(e.__class__, exceptions.ClientError)
+ assert isinstance(e, exceptions.ClientError)
def test_exception_classes_across_clients_are_the_same(self):
creator = self.create_client_creator(
@@ -1527,38 +1507,37 @@ class TestClientErrors(TestAutoGeneratedClient):
except client2.exceptions.TestOperationException as e:
# Caught exception should as well be an instance of the
# other client's TestOperationException
- self.assertIsInstance(
- e, client.exceptions.TestOperationException)
+ assert isinstance(e, client.exceptions.TestOperationException)
class TestConfig(unittest.TestCase):
def test_can_use_args_to_construct(self):
config = botocore.config.Config(*botocore.config.Config.OPTION_DEFAULTS.values())
for option, default_value in botocore.config.Config.OPTION_DEFAULTS.items():
- self.assertTrue(hasattr(config, option))
- self.assertEqual(getattr(config, option), default_value)
+ assert hasattr(config, option)
+ assert getattr(config, option) == default_value
def test_can_use_kwargs_to_construct(self):
config = botocore.config.Config(**botocore.config.Config.OPTION_DEFAULTS)
for option, default_value in botocore.config.Config.OPTION_DEFAULTS.items():
- self.assertTrue(hasattr(config, option))
- self.assertEqual(getattr(config, option), default_value)
+ assert hasattr(config, option)
+ assert getattr(config, option) == default_value
def test_can_use_mix_of_args_and_kwargs(self):
config = botocore.config.Config('us-east-1', read_timeout=50)
- self.assertEqual(config.region_name, 'us-east-1')
- self.assertEqual(config.read_timeout, 50)
+ assert config.region_name == 'us-east-1'
+ assert config.read_timeout == 50
def test_invalid_kwargs(self):
- with six.assertRaisesRegex(self, TypeError, 'Got unexpected keyword'):
+ with pytest.raises(TypeError, match='Got unexpected keyword'):
botocore.config.Config(foo='foo')
def test_pass_invalid_length_of_args(self):
- with six.assertRaisesRegex(self, TypeError, 'Takes at most'):
+ with pytest.raises(TypeError, match=r'Takes at most'):
botocore.config.Config('foo', *botocore.config.Config.OPTION_DEFAULTS.values())
def test_create_with_multiple_kwargs(self):
- with six.assertRaisesRegex(self, TypeError, 'Got multiple values'):
+ with pytest.raises(TypeError, match='Got multiple values'):
botocore.config.Config('us-east-1', region_name='us-east-1')
def test_merge_returns_new_config_object(self):
@@ -1566,10 +1545,10 @@ class TestConfig(unittest.TestCase):
other_config = botocore.config.Config()
new_config = config.merge(other_config)
# Check the type is correct
- self.assertIsInstance(new_config, botocore.config.Config)
+ assert isinstance(new_config, botocore.config.Config)
# Make sure the new config is a brand new config object
- self.assertIsNot(new_config, config)
- self.assertIsNot(new_config, other_config)
+ assert new_config is not config
+ assert new_config is not other_config
def test_general_merge_keeps_default_values(self):
config = botocore.config.Config()
@@ -1577,54 +1556,54 @@ class TestConfig(unittest.TestCase):
config_properties = vars(config)
new_config = config.merge(other_config)
# Ensure that the values all stayed the same in the new config
- self.assertEqual(config_properties, vars(new_config))
+ assert config_properties == vars(new_config)
def test_merge_overrides_values(self):
config = botocore.config.Config(region_name='us-east-1')
other_config = botocore.config.Config(region_name='us-west-2')
new_config = config.merge(other_config)
- self.assertEqual(new_config.region_name, 'us-west-2')
+ assert new_config.region_name == 'us-west-2'
def test_merge_overrides_values_even_when_using_default(self):
config = botocore.config.Config(region_name='us-east-1')
other_config = botocore.config.Config(region_name=None)
new_config = config.merge(other_config)
- self.assertEqual(new_config.region_name, None)
+ assert new_config.region_name is None
def test_merge_overrides_values_even_when_using_default_timeout(self):
config = botocore.config.Config(read_timeout=30)
other_config = botocore.config.Config(read_timeout=DEFAULT_TIMEOUT)
new_config = config.merge(other_config)
- self.assertEqual(new_config.read_timeout, DEFAULT_TIMEOUT)
+ assert new_config.read_timeout == DEFAULT_TIMEOUT
def test_merge_overrides_only_when_user_provided_values(self):
config = botocore.config.Config(
region_name='us-east-1', signature_version='s3v4')
other_config = botocore.config.Config(region_name='us-west-2')
new_config = config.merge(other_config)
- self.assertEqual(new_config.region_name, 'us-west-2')
- self.assertEqual(new_config.signature_version, 's3v4')
+ assert new_config.region_name == 'us-west-2'
+ assert new_config.signature_version == 's3v4'
def test_can_set_retry_max_attempts(self):
config = botocore.config.Config(retries={'max_attempts': 15})
- self.assertEqual(config.retries['max_attempts'], 15)
+ assert config.retries['max_attempts'] == 15
def test_validates_retry_config(self):
- with six.assertRaisesRegex(
- self, InvalidRetryConfigurationError,
- 'Cannot provide retry configuration for "not-allowed"'):
+ with pytest.raises(
+ InvalidRetryConfigurationError,
+ match=r'Cannot provide retry configuration for "not-allowed"'):
botocore.config.Config(retries={'not-allowed': True})
def test_validates_max_retry_attempts(self):
- with self.assertRaises(InvalidMaxRetryAttemptsError):
+ with pytest.raises(InvalidMaxRetryAttemptsError):
botocore.config.Config(retries={'max_attempts': -1})
def test_validates_total_max_attempts(self):
- with self.assertRaises(InvalidMaxRetryAttemptsError):
+ with pytest.raises(InvalidMaxRetryAttemptsError):
botocore.config.Config(retries={'total_max_attempts': 0})
def test_validaties_retry_mode(self):
- with self.assertRaises(InvalidRetryModeError):
+ with pytest.raises(InvalidRetryModeError):
botocore.config.Config(retries={'mode': 'turbo-mode'})
@@ -1647,13 +1626,12 @@ class TestClientEndpointBridge(unittest.TestCase):
resolver.construct_endpoint.return_value = None
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve('myservice', region_name='guess')
- self.assertEqual('guess', resolved['region_name'])
- self.assertEqual('guess', resolved['signing_region'])
- self.assertEqual('myservice', resolved['signing_name'])
- self.assertEqual('myservice', resolved['service_name'])
- self.assertEqual('v4', resolved['signature_version'])
- self.assertEqual('https://myservice.guess.amazonaws.com',
- resolved['endpoint_url'])
+ assert resolved['region_name'] == 'guess'
+ assert resolved['signing_region'] == 'guess'
+ assert resolved['signing_name'] == 'myservice'
+ assert resolved['service_name'] == 'myservice'
+ assert resolved['signature_version'] == 'v4'
+ assert resolved['endpoint_url'] == 'https://myservice.guess.amazonaws.com'
def test_uses_us_east_1_by_default_for_s3(self):
resolver = mock.Mock()
@@ -1663,11 +1641,9 @@ class TestClientEndpointBridge(unittest.TestCase):
'protocols': ['https']}
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve('s3')
- self.assertEqual('us-east-1', resolved['region_name'])
- self.assertEqual('us-east-1', resolved['signing_region'])
- self.assertEqual('https://s3.amazonaws.com',
- resolved['endpoint_url'])
-
+ assert resolved['region_name'] == 'us-east-1'
+ assert resolved['signing_region'] == 'us-east-1'
+ assert resolved['endpoint_url'] == 'https://s3.amazonaws.com'
def test_uses_region_from_client_config_if_available(self):
resolver = mock.Mock()
resolver.construct_endpoint.return_value = None
@@ -1675,10 +1651,9 @@ class TestClientEndpointBridge(unittest.TestCase):
client_config.region_name = 'us-foo-bar'
bridge = ClientEndpointBridge(resolver, client_config=client_config)
resolved = bridge.resolve('test')
- self.assertEqual('us-foo-bar', resolved['region_name'])
- self.assertEqual('us-foo-bar', resolved['signing_region'])
- self.assertEqual('https://test.us-foo-bar.amazonaws.com',
- resolved['endpoint_url'])
+ assert resolved['region_name'] == 'us-foo-bar'
+ assert resolved['signing_region'] == 'us-foo-bar'
+ assert resolved['endpoint_url'] == 'https://test.us-foo-bar.amazonaws.com'
def test_can_guess_endpoint_and_use_given_endpoint_url(self):
resolver = mock.Mock()
@@ -1686,9 +1661,9 @@ class TestClientEndpointBridge(unittest.TestCase):
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve(
'test', 'guess', endpoint_url='http://test.com')
- self.assertEqual('guess', resolved['region_name'])
- self.assertEqual('guess', resolved['signing_region'])
- self.assertEqual('http://test.com', resolved['endpoint_url'])
+ assert 'guess' == resolved['region_name']
+ assert 'guess' == resolved['signing_region']
+ assert resolved['endpoint_url'] == 'http://test.com'
def test_can_use_endpoint_url_with_resolved_endpoint(self):
resolver = mock.Mock()
@@ -1698,10 +1673,10 @@ class TestClientEndpointBridge(unittest.TestCase):
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve(
'ec2', 'us-west-2', endpoint_url='https://foo')
- self.assertEqual('us-west-2', resolved['region_name'])
- self.assertEqual('us-west-2', resolved['signing_region'])
- self.assertEqual('https://foo', resolved['endpoint_url'])
- self.assertEqual('v2', resolved['signature_version'])
+ assert resolved['region_name'] == 'us-west-2'
+ assert resolved['signing_region'] == 'us-west-2'
+ assert resolved['endpoint_url'] == 'https://foo'
+ assert resolved['signature_version'] == 'v2'
def test_uses_ssl_common_name_over_hostname_if_present(self):
resolver = mock.Mock()
@@ -1711,9 +1686,9 @@ class TestClientEndpointBridge(unittest.TestCase):
'endpointName': 'us-west-2', 'protocols': ['https']}
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve('myservice', 'us-west-2')
- self.assertEqual('us-west-2', resolved['region_name'])
- self.assertEqual('us-west-2', resolved['signing_region'])
- self.assertEqual('https://common-name.com', resolved['endpoint_url'])
+ assert resolved['region_name'] == 'us-west-2'
+ assert resolved['signing_region'] == 'us-west-2'
+ assert resolved['endpoint_url'] == 'https://common-name.com'
def test_can_create_http_urls(self):
resolver = mock.Mock()
@@ -1723,7 +1698,7 @@ class TestClientEndpointBridge(unittest.TestCase):
'endpointName': 'us-foo-baz'}
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve('myservice', 'us-foo-baz', is_secure=False)
- self.assertEqual('http://host.com', resolved['endpoint_url'])
+ assert resolved['endpoint_url'] == 'http://host.com'
def test_can_create_http_urls(self):
resolver = mock.Mock()
@@ -1733,7 +1708,7 @@ class TestClientEndpointBridge(unittest.TestCase):
'endpointName': 'us-foo-baz'}
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve('myservice', 'us-foo-baz', is_secure=False)
- self.assertEqual('http://host.com', resolved['endpoint_url'])
+ assert resolved['endpoint_url'] == 'http://host.com'
def test_credential_scope_overrides_signing_region(self):
resolver = mock.Mock()
@@ -1746,8 +1721,8 @@ class TestClientEndpointBridge(unittest.TestCase):
}
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve('myservice', 'us-foo-baz')
- self.assertEqual('us-foo-baz', resolved['region_name'])
- self.assertEqual('override', resolved['signing_region'])
+ assert resolved['region_name'] == 'us-foo-baz'
+ assert resolved['signing_region'] == 'override'
def test_cred_scope_does_not_override_signing_region_if_endpoint_url(self):
resolver = mock.Mock()
@@ -1761,9 +1736,9 @@ class TestClientEndpointBridge(unittest.TestCase):
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve('myservice', 'us-foo-baz',
endpoint_url='https://override.com')
- self.assertEqual('us-foo-baz', resolved['region_name'])
- self.assertEqual('us-foo-baz', resolved['signing_region'])
- self.assertEqual('https://override.com', resolved['endpoint_url'])
+ assert resolved['region_name'] == 'us-foo-baz'
+ assert resolved['signing_region'] == 'us-foo-baz'
+ assert resolved['endpoint_url'] == 'https://override.com'
def test_resolved_region_overrides_region_when_no_endpoint_url(self):
resolver = mock.Mock()
@@ -1776,9 +1751,9 @@ class TestClientEndpointBridge(unittest.TestCase):
}
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve('myservice', 'will-not-be-there')
- self.assertEqual('override', resolved['region_name'])
- self.assertEqual('override', resolved['signing_region'])
- self.assertEqual('https://host.com', resolved['endpoint_url'])
+ assert resolved['region_name'] == 'override'
+ assert resolved['signing_region'] == 'override'
+ assert resolved['endpoint_url'] == 'https://host.com'
def test_does_not_use_https_if_not_available(self):
resolver = mock.Mock()
@@ -1793,7 +1768,7 @@ class TestClientEndpointBridge(unittest.TestCase):
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve('myservice')
# We should resolve to http://, not https://
- self.assertEqual('http://host.com', resolved['endpoint_url'])
+ assert resolved['endpoint_url'] == 'http://host.com'
def test_uses_signature_version_from_client_config(self):
resolver = mock.Mock()
@@ -1804,7 +1779,7 @@ class TestClientEndpointBridge(unittest.TestCase):
client_config.signature_version = 's3'
bridge = ClientEndpointBridge(resolver, client_config=client_config)
resolved = bridge.resolve('test', 'us-west-2')
- self.assertEqual('s3', resolved['signature_version'])
+ assert resolved['signature_version'] == 's3'
def test_uses_signature_version_from_client_config_when_guessing(self):
resolver = mock.Mock()
@@ -1813,7 +1788,7 @@ class TestClientEndpointBridge(unittest.TestCase):
client_config.signature_version = 's3v4'
bridge = ClientEndpointBridge(resolver, client_config=client_config)
resolved = bridge.resolve('test', 'us-west-2')
- self.assertEqual('s3v4', resolved['signature_version'])
+ assert resolved['signature_version'] == 's3v4'
def test_uses_signature_version_from_scoped_config(self):
resolver = mock.Mock()
@@ -1824,7 +1799,7 @@ class TestClientEndpointBridge(unittest.TestCase):
scoped_config.get.return_value = {'signature_version': 's3'}
bridge = ClientEndpointBridge(resolver, scoped_config)
resolved = bridge.resolve('test', 'us-west-2')
- self.assertEqual('s3', resolved['signature_version'])
+ assert resolved['signature_version'] == 's3'
def test_uses_s3v4_over_s3_for_s3(self):
resolver = mock.Mock()
@@ -1833,7 +1808,7 @@ class TestClientEndpointBridge(unittest.TestCase):
'endpointName': 'us-west-2', 'signatureVersions': ['s3v4', 's3']}
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve('s3', 'us-west-2')
- self.assertEqual('s3v4', resolved['signature_version'])
+ assert resolved['signature_version'] == 's3v4'
def test_uses_s3v4_over_others_for_s3(self):
resolver = mock.Mock()
@@ -1842,7 +1817,7 @@ class TestClientEndpointBridge(unittest.TestCase):
'endpointName': 'us-west-2', 'signatureVersions': ['s3v4', 'v4']}
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve('s3', 'us-west-2')
- self.assertEqual('s3v4', resolved['signature_version'])
+ assert resolved['signature_version'] == 's3v4'
def test_uses_v4_over_other_signers(self):
resolver = mock.Mock()
@@ -1851,7 +1826,7 @@ class TestClientEndpointBridge(unittest.TestCase):
'signatureVersions': ['v2', 'v4'], 'endpointName': 'us-west-2'}
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve('test', 'us-west-2')
- self.assertEqual('v4', resolved['signature_version'])
+ assert resolved['signature_version'] == 'v4'
def test_uses_known_signers_from_list_of_signature_versions(self):
resolver = mock.Mock()
@@ -1861,7 +1836,7 @@ class TestClientEndpointBridge(unittest.TestCase):
'endpointName': 'us-west-2'}
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve('test', 'us-west-2')
- self.assertEqual('v3https', resolved['signature_version'])
+ assert resolved['signature_version'] == 'v3https'
def test_raises_when_signature_version_is_unknown(self):
resolver = mock.Mock()
@@ -1869,7 +1844,7 @@ class TestClientEndpointBridge(unittest.TestCase):
'partition': 'aws', 'hostname': 'test',
'endpointName': 'us-west-2', 'signatureVersions': ['foo']}
bridge = ClientEndpointBridge(resolver)
- with self.assertRaises(UnknownSignatureVersionError):
+ with pytest.raises(UnknownSignatureVersionError):
bridge.resolve('test', 'us-west-2')
def test_uses_first_known_signature_version(self):
@@ -1880,7 +1855,7 @@ class TestClientEndpointBridge(unittest.TestCase):
'signatureVersions': ['foo', 'bar', 'baz', 's3v4', 'v2']}
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve('test', 'us-west-2')
- self.assertEqual('s3v4', resolved['signature_version'])
+ assert resolved['signature_version'] == 's3v4'
def test_raises_when_signature_version_is_not_found(self):
resolver = mock.Mock()
@@ -1888,7 +1863,7 @@ class TestClientEndpointBridge(unittest.TestCase):
'partition': 'aws', 'hostname': 'test',
'endpointName': 'us-west-2'}
bridge = ClientEndpointBridge(resolver)
- with self.assertRaises(UnknownSignatureVersionError):
+ with pytest.raises(UnknownSignatureVersionError):
bridge.resolve('test', 'us-west-2')
def test_uses_service_name_as_signing_name(self):
@@ -1899,7 +1874,7 @@ class TestClientEndpointBridge(unittest.TestCase):
'endpointName': 'us-west-2'}
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve('test', 'us-west-2')
- self.assertEqual('test', resolved['signing_name'])
+ assert resolved['signing_name'] == 'test'
def test_uses_credential_scope_signing_name(self):
resolver = mock.Mock()
@@ -1912,7 +1887,7 @@ class TestClientEndpointBridge(unittest.TestCase):
}
bridge = ClientEndpointBridge(resolver)
resolved = bridge.resolve('test', 'us-west-2')
- self.assertEqual('override', resolved['signing_name'])
+ assert 'override' == resolved['signing_name']
def test_uses_service_signing_name_when_present_and_no_cred_scope(self):
resolver = mock.Mock()
@@ -1922,23 +1897,19 @@ class TestClientEndpointBridge(unittest.TestCase):
'endpointName': 'us-west-2'}
bridge = ClientEndpointBridge(resolver, service_signing_name='foo')
resolved = bridge.resolve('test', 'us-west-2')
- self.assertEqual('foo', resolved['signing_name'])
+ assert resolved['signing_name'] == 'foo'
def test_can_construct_dualstack_endpoint_when_enabled(self):
scoped_config = {'s3': {'use_dualstack_endpoint': True}}
bridge = ClientEndpointBridge(self.resolver, scoped_config)
resolved = bridge.resolve('s3', 'us-east-1')
- self.assertEqual(
- resolved['endpoint_url'],
- 'https://s3.dualstack.us-east-1.amazonaws.com')
+ assert resolved['endpoint_url'] == 'https://s3.dualstack.us-east-1.amazonaws.com'
def test_dualstack_can_use_client_config(self):
config = botocore.config.Config(s3={'use_dualstack_endpoint': True})
bridge = ClientEndpointBridge(self.resolver, client_config=config)
resolved = bridge.resolve('s3', 'us-east-1')
- self.assertEqual(
- resolved['endpoint_url'],
- 'https://s3.dualstack.us-east-1.amazonaws.com')
+ assert resolved['endpoint_url'] == 'https://s3.dualstack.us-east-1.amazonaws.com'
def test_dualstack_client_config_beats_scoped_config(self):
scoped_config = {'s3': {'use_dualstack_endpoint': False}}
@@ -1946,9 +1917,7 @@ class TestClientEndpointBridge(unittest.TestCase):
bridge = ClientEndpointBridge(self.resolver, scoped_config,
client_config=config)
resolved = bridge.resolve('s3', 'us-east-1')
- self.assertEqual(
- resolved['endpoint_url'],
- 'https://s3.dualstack.us-east-1.amazonaws.com')
+ assert resolved['endpoint_url'] == 'https://s3.dualstack.us-east-1.amazonaws.com'
def test_disable_dualstack_explicitly(self):
scoped_config = {'s3': {'use_dualstack_endpoint': True}}
@@ -1956,9 +1925,7 @@ class TestClientEndpointBridge(unittest.TestCase):
bridge = ClientEndpointBridge(self.resolver, scoped_config,
client_config=config)
resolved = bridge.resolve('s3', 'us-east-1')
- self.assertEqual(
- resolved['endpoint_url'],
- 'https://s3.amazonaws.com')
+ assert resolved['endpoint_url'] == 'https://s3.amazonaws.com'
def test_dualstack_honors_dns_suffix(self):
scoped_config = {'s3': {'use_dualstack_endpoint': True}}
@@ -1966,7 +1933,4 @@ class TestClientEndpointBridge(unittest.TestCase):
self.boilerplate_response['endpointName'] = 'cn-north-1'
bridge = ClientEndpointBridge(self.resolver, scoped_config)
resolved = bridge.resolve('s3', 'cn-north-1')
- self.assertEqual(
- resolved['endpoint_url'],
- 'https://s3.dualstack.cn-north-1.amazonaws.com.cn'
- )
+ assert resolved['endpoint_url'] == 'https://s3.dualstack.cn-north-1.amazonaws.com.cn'
diff --git a/tests/unit/test_compat.py b/tests/unit/test_compat.py
index 21352ac8..a4e1066f 100644
--- a/tests/unit/test_compat.py
+++ b/tests/unit/test_compat.py
@@ -12,6 +12,7 @@
# language governing permissions and limitations under the License.
import datetime
from tests import mock
+import pytest
from botocore.exceptions import MD5UnavailableError
from botocore.compat import (
@@ -25,11 +26,11 @@ class TotalSecondsTest(BaseEnvVar):
def test_total_seconds(self):
delta = datetime.timedelta(days=1, seconds=45)
remaining = total_seconds(delta)
- self.assertEqual(remaining, 86445.0)
+ assert remaining == 86445.0
delta = datetime.timedelta(seconds=33, microseconds=772)
remaining = total_seconds(delta)
- self.assertEqual(remaining, 33.000772)
+ assert remaining == 33.000772
class TestUnquoteStr(unittest.TestCase):
@@ -37,49 +38,49 @@ class TestUnquoteStr(unittest.TestCase):
value = u'%E2%9C%93'
# Note: decoded to unicode and utf-8 decoded as well.
# This would work in python2 and python3.
- self.assertEqual(unquote_str(value), u'\u2713')
+ assert unquote_str(value) == u'\u2713'
def test_unquote_normal(self):
value = u'foo'
# Note: decoded to unicode and utf-8 decoded as well.
# This would work in python2 and python3.
- self.assertEqual(unquote_str(value), u'foo')
+ assert unquote_str(value) == u'foo'
def test_unquote_with_spaces(self):
value = u'foo+bar'
# Note: decoded to unicode and utf-8 decoded as well.
# This would work in python2 and python3.
- self.assertEqual(unquote_str(value), 'foo bar')
+ assert unquote_str(value) == 'foo bar'
class TestEnsureBytes(unittest.TestCase):
def test_string(self):
value = 'foo'
response = ensure_bytes(value)
- self.assertIsInstance(response, six.binary_type)
- self.assertEqual(response, b'foo')
+ assert isinstance(response, six.binary_type)
+ assert response == b'foo'
def test_binary(self):
value = b'bar'
response = ensure_bytes(value)
- self.assertIsInstance(response, six.binary_type)
- self.assertEqual(response, b'bar')
+ assert isinstance(response, six.binary_type)
+ assert response == b'bar'
def test_unicode(self):
value = u'baz'
response = ensure_bytes(value)
- self.assertIsInstance(response, six.binary_type)
- self.assertEqual(response, b'baz')
+ assert isinstance(response, six.binary_type)
+ assert response == b'baz'
def test_non_ascii(self):
value = u'\u2713'
response = ensure_bytes(value)
- self.assertIsInstance(response, six.binary_type)
- self.assertEqual(response, b'\xe2\x9c\x93')
+ assert isinstance(response, six.binary_type)
+ assert response == b'\xe2\x9c\x93'
def test_non_string_or_bytes_raises_error(self):
value = 500
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
ensure_bytes(value)
@@ -88,11 +89,11 @@ class TestGetMD5(unittest.TestCase):
md5 = mock.Mock()
with mock.patch('botocore.compat.MD5_AVAILABLE', True):
with mock.patch('hashlib.md5', mock.Mock(return_value=md5)):
- self.assertEqual(get_md5(), md5)
+ assert get_md5() == md5
def test_unavailable_raises_error(self):
with mock.patch('botocore.compat.MD5_AVAILABLE', False):
- with self.assertRaises(MD5UnavailableError):
+ with pytest.raises(MD5UnavailableError):
get_md5()
@@ -121,10 +122,9 @@ class TestCompatShellSplit(unittest.TestCase):
r'a\\\\"b c" d e': [r'a\\b c', r'd', r'e']
}
for input_string, expected_output in windows_cases.items():
- self.assertEqual(compat_shell_split(input_string, "win32"),
- expected_output)
+ assert compat_shell_split(input_string, "win32") == expected_output
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
compat_shell_split(r'"', "win32")
def test_compat_shell_split_unix(self):
@@ -151,21 +151,19 @@ class TestCompatShellSplit(unittest.TestCase):
r'a\\\\"b c" d e': [r'a\\b c', r'd', r'e']
}
for input_string, expected_output in unix_cases.items():
- self.assertEqual(compat_shell_split(input_string, "linux2"),
- expected_output)
- self.assertEqual(compat_shell_split(input_string, "darwin"),
- expected_output)
+ assert compat_shell_split(input_string, "linux2") == expected_output
+ assert compat_shell_split(input_string, "darwin") == expected_output
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
compat_shell_split(r'"', "linux2")
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
compat_shell_split(r'"', "darwin")
class TestTimezoneOperations(unittest.TestCase):
def test_get_tzinfo_options(self):
options = get_tzinfo_options()
- self.assertGreater(len(options), 0)
+ assert len(options) > 0
for tzinfo in options:
- self.assertIsInstance(tzinfo(), datetime.tzinfo)
+ assert isinstance(tzinfo(), datetime.tzinfo)
diff --git a/tests/unit/test_config_provider.py b/tests/unit/test_config_provider.py
index 04ff258b..4f197c95 100644
--- a/tests/unit/test_config_provider.py
+++ b/tests/unit/test_config_provider.py
@@ -12,6 +12,7 @@
# language governing permissions and limitations under the License.
from tests import unittest
from tests import mock
+import pytest
import botocore
import botocore.session as session
@@ -38,7 +39,7 @@ class TestConfigChainFactory(unittest.TestCase):
**create_config_chain_args
)
value = chain.provide()
- self.assertEqual(value, expected_value)
+ assert value == expected_value
def test_chain_builder_can_provide_instance(self):
self.assert_chain_does_provide(
@@ -292,7 +293,7 @@ class TestConfigValueStore(unittest.TestCase):
def test_does_provide_none_if_no_variable_exists(self):
provider = ConfigValueStore()
value = provider.get_config_variable('fake_variable')
- self.assertIsNone(value)
+ assert value is None
def test_does_provide_value_if_variable_exists(self):
mock_value_provider = mock.Mock(spec=BaseProvider)
@@ -301,13 +302,13 @@ class TestConfigValueStore(unittest.TestCase):
'fake_variable': mock_value_provider,
})
value = provider.get_config_variable('fake_variable')
- self.assertEqual(value, 'foo')
+ assert value == 'foo'
def test_can_set_variable(self):
provider = ConfigValueStore()
provider.set_config_variable('fake_variable', 'foo')
value = provider.get_config_variable('fake_variable')
- self.assertEqual(value, 'foo')
+ assert value == 'foo'
def test_can_set_config_provider(self):
foo_value_provider = mock.Mock(spec=BaseProvider)
@@ -317,14 +318,14 @@ class TestConfigValueStore(unittest.TestCase):
})
value = provider.get_config_variable('fake_variable')
- self.assertEqual(value, 'foo')
+ assert value == 'foo'
bar_value_provider = mock.Mock(spec=BaseProvider)
bar_value_provider.provide.return_value = 'bar'
provider.set_config_provider('fake_variable', bar_value_provider)
value = provider.get_config_variable('fake_variable')
- self.assertEqual(value, 'bar')
+ assert value == 'bar'
class TestInstanceVarProvider(unittest.TestCase):
@@ -337,7 +338,7 @@ class TestInstanceVarProvider(unittest.TestCase):
session=fake_session,
)
value = provider.provide()
- self.assertEqual(value, expected_value)
+ assert value == expected_value
def test_can_provide_value(self):
self.assert_provides_value(
@@ -358,7 +359,7 @@ class TestEnvironmentProvider(unittest.TestCase):
def assert_does_provide(self, env, name, expected_value):
provider = EnvironmentProvider(name=name, env=env)
value = provider.provide()
- self.assertEqual(value, expected_value)
+ assert value == expected_value
def test_does_provide_none_if_no_variable_exists(self):
self.assert_does_provide(
@@ -387,7 +388,7 @@ class TestScopedConfigProvider(unittest.TestCase):
session=fake_session,
)
value = property_provider.provide()
- self.assertEqual(value, expected_value)
+ assert value == expected_value
def test_can_provide_value(self):
self.assert_provides_value(
@@ -450,26 +451,26 @@ def assert_chain_does_provide(providers, expected_value):
assert value == expected_value
-def test_chain_provider():
+@pytest.mark.parametrize("expected,return_values", [
+ (None, []),
+ (None, [None]),
+ ('foo', ['foo']),
+ ('foo', ['foo', 'bar']),
+ ('bar', [None, 'bar']),
+ ('foo', ['foo', None]),
+ ('baz', [None, None, 'baz']),
+ ('bar', [None, 'bar', None]),
+ ('foo', ['foo', 'bar', None]),
+ ('foo', ['foo', 'bar', 'baz']),
+])
+def test_chain_provider(expected, return_values):
# Each case is a tuple with the first element being the expected return
# value form the ChainProvider. The second value being a list of return
# values from the individual providers that are in the chain.
- cases = [
- (None, []),
- (None, [None]),
- ('foo', ['foo']),
- ('foo', ['foo', 'bar']),
- ('bar', [None, 'bar']),
- ('foo', ['foo', None]),
- ('baz', [None, None, 'baz']),
- ('bar', [None, 'bar', None]),
- ('foo', ['foo', 'bar', None]),
- ('foo', ['foo', 'bar', 'baz']),
- ]
- for case in cases:
- assert_chain_does_provide(
- _make_providers_that_return(case[1]),
- case[0])
+
+ assert_chain_does_provide(
+ _make_providers_that_return(return_values),
+ expected)
class TestChainProvider(unittest.TestCase):
@@ -479,15 +480,15 @@ class TestChainProvider(unittest.TestCase):
conversion_func=int,
)
value = chain_provider.provide()
- self.assertIsInstance(value, int)
- self.assertEqual(value, 1)
+ assert isinstance(value, int)
+ assert value == 1
class TestConstantProvider(unittest.TestCase):
def test_can_provide_value(self):
provider = ConstantProvider(value='foo')
value = provider.provide()
- self.assertEqual(value, 'foo')
+ assert value == 'foo'
class TestSectionConfigProvider(unittest.TestCase):
@@ -501,7 +502,7 @@ class TestSectionConfigProvider(unittest.TestCase):
override_providers=override_providers
)
value = provider.provide()
- self.assertEqual(value, expected_value)
+ assert value == expected_value
def test_provide_section_config(self):
self.assert_provides_value(
diff --git a/tests/unit/test_configloader.py b/tests/unit/test_configloader.py
index 14ce817f..dd62a118 100644
--- a/tests/unit/test_configloader.py
+++ b/tests/unit/test_configloader.py
@@ -17,6 +17,7 @@ import os
from tests import mock
import tempfile
import shutil
+import pytest
import botocore.exceptions
from botocore.configloader import raw_config_parse, load_config, \
@@ -59,12 +60,12 @@ class TestConfigLoader(BaseEnvVar):
return full_path
def test_config_not_found(self):
- with self.assertRaises(botocore.exceptions.ConfigNotFound):
+ with pytest.raises(botocore.exceptions.ConfigNotFound):
loaded_config = raw_config_parse(path('aws_config_notfound'))
def test_config_parse_error(self):
filename = path('aws_config_bad')
- with self.assertRaises(botocore.exceptions.ConfigParseError):
+ with pytest.raises(botocore.exceptions.ConfigParseError):
raw_config_parse(filename)
def test_config_parse_error_bad_unicode(self):
diff --git a/tests/unit/test_credentials.py b/tests/unit/test_credentials.py
index 791b7452..1c8d914f 100644
--- a/tests/unit/test_credentials.py
+++ b/tests/unit/test_credentials.py
@@ -19,6 +19,7 @@ import tempfile
import shutil
import json
import copy
+import pytest
from dateutil.tz import tzlocal, tzutc
@@ -73,8 +74,8 @@ def path(filename):
class TestCredentials(BaseEnvVar):
def _ensure_credential_is_normalized_as_unicode(self, access, secret):
c = credentials.Credentials(access, secret)
- self.assertTrue(isinstance(c.access_key, type(u'u')))
- self.assertTrue(isinstance(c.secret_key, type(u'u')))
+ assert isinstance(c.access_key, str)
+ assert isinstance(c.secret_key, str)
def test_detect_nonascii_character(self):
self._ensure_credential_is_normalized_as_unicode(
@@ -112,64 +113,64 @@ class TestRefreshableCredentials(TestCredentials):
# say the current time is utcnow(), then we should need
# a refresh.
self.mock_time.return_value = datetime.now(tzlocal())
- self.assertTrue(self.creds.refresh_needed())
+ assert self.creds.refresh_needed()
# We should refresh creds, if we try to access "access_key"
# or any of the cred vars.
- self.assertEqual(self.creds.access_key, 'NEW-ACCESS')
- self.assertEqual(self.creds.secret_key, 'NEW-SECRET')
- self.assertEqual(self.creds.token, 'NEW-TOKEN')
+ assert self.creds.access_key == 'NEW-ACCESS'
+ assert self.creds.secret_key == 'NEW-SECRET'
+ assert self.creds.token == 'NEW-TOKEN'
def test_no_expiration(self):
creds = credentials.RefreshableCredentials(
'ORIGINAL-ACCESS', 'ORIGINAL-SECRET', 'ORIGINAL-TOKEN',
None, self.refresher, 'iam-role', time_fetcher=self.mock_time
)
- self.assertFalse(creds.refresh_needed())
+ assert not creds.refresh_needed()
def test_no_refresh_needed(self):
# The expiry time was 30 minutes ago, let's say it's an hour
# ago currently. That would mean we don't need a refresh.
self.mock_time.return_value = (
datetime.now(tzlocal()) - timedelta(minutes=60))
- self.assertTrue(not self.creds.refresh_needed())
+ assert not self.creds.refresh_needed()
- self.assertEqual(self.creds.access_key, 'ORIGINAL-ACCESS')
- self.assertEqual(self.creds.secret_key, 'ORIGINAL-SECRET')
- self.assertEqual(self.creds.token, 'ORIGINAL-TOKEN')
+ assert self.creds.access_key == 'ORIGINAL-ACCESS'
+ assert self.creds.secret_key == 'ORIGINAL-SECRET'
+ assert self.creds.token == 'ORIGINAL-TOKEN'
def test_get_credentials_set(self):
# We need to return a consistent set of credentials to use during the
# signing process.
self.mock_time.return_value = (
datetime.now(tzlocal()) - timedelta(minutes=60))
- self.assertTrue(not self.creds.refresh_needed())
+ assert not self.creds.refresh_needed()
credential_set = self.creds.get_frozen_credentials()
- self.assertEqual(credential_set.access_key, 'ORIGINAL-ACCESS')
- self.assertEqual(credential_set.secret_key, 'ORIGINAL-SECRET')
- self.assertEqual(credential_set.token, 'ORIGINAL-TOKEN')
+ assert credential_set.access_key == 'ORIGINAL-ACCESS'
+ assert credential_set.secret_key == 'ORIGINAL-SECRET'
+ assert credential_set.token == 'ORIGINAL-TOKEN'
def test_refresh_returns_empty_dict(self):
self.refresher.return_value = {}
self.mock_time.return_value = datetime.now(tzlocal())
- self.assertTrue(self.creds.refresh_needed())
+ assert self.creds.refresh_needed()
- with self.assertRaises(botocore.exceptions.CredentialRetrievalError):
+ with pytest.raises(botocore.exceptions.CredentialRetrievalError):
self.creds.access_key
def test_refresh_returns_none(self):
self.refresher.return_value = None
self.mock_time.return_value = datetime.now(tzlocal())
- self.assertTrue(self.creds.refresh_needed())
+ assert self.creds.refresh_needed()
- with self.assertRaises(botocore.exceptions.CredentialRetrievalError):
+ with pytest.raises(botocore.exceptions.CredentialRetrievalError):
self.creds.access_key
def test_refresh_returns_partial_credentials(self):
self.refresher.return_value = {'access_key': 'akid'}
self.mock_time.return_value = datetime.now(tzlocal())
- self.assertTrue(self.creds.refresh_needed())
+ assert self.creds.refresh_needed()
- with self.assertRaises(botocore.exceptions.CredentialRetrievalError):
+ with pytest.raises(botocore.exceptions.CredentialRetrievalError):
self.creds.access_key
@@ -199,7 +200,7 @@ class TestDeferredRefreshableCredentials(unittest.TestCase):
# Now that the object has been accessed, it should have called the
# refresher
creds.get_frozen_credentials()
- self.assertEqual(self.refresher.call_count, 1)
+ assert self.refresher.call_count == 1
def test_refresh_only_called_once(self):
creds = credentials.DeferredRefreshableCredentials(
@@ -211,7 +212,7 @@ class TestDeferredRefreshableCredentials(unittest.TestCase):
# The credentials were accessed several times in a row, but only
# should call refresh once.
- self.assertEqual(self.refresher.call_count, 1)
+ assert self.refresher.call_count == 1
class TestAssumeRoleCredentialFetcher(BaseEnvVar):
@@ -262,7 +263,7 @@ class TestAssumeRoleCredentialFetcher(BaseEnvVar):
expected_response = self.get_expected_creds_from_response(response)
response = refresher.fetch_credentials()
- self.assertEqual(response, expected_response)
+ assert response == expected_response
def test_expiration_in_datetime_format(self):
response = {
@@ -286,7 +287,7 @@ class TestAssumeRoleCredentialFetcher(BaseEnvVar):
expected_response = self.get_expected_creds_from_response(response)
response = refresher.fetch_credentials()
- self.assertEqual(response, expected_response)
+ assert response == expected_response
def test_retrieves_from_cache(self):
date_in_future = datetime.utcnow() + timedelta(seconds=1000)
@@ -314,7 +315,7 @@ class TestAssumeRoleCredentialFetcher(BaseEnvVar):
)
response = refresher.fetch_credentials()
- self.assertEqual(response, expected_response)
+ assert response == expected_response
client_creator.assert_not_called()
def test_cache_key_is_windows_safe(self):
@@ -341,8 +342,8 @@ class TestAssumeRoleCredentialFetcher(BaseEnvVar):
cache_key = (
'75c539f0711ba78c5b9e488d0add95f178a54d74'
)
- self.assertIn(cache_key, cache)
- self.assertEqual(cache[cache_key], response)
+ assert cache_key in cache
+ assert cache[cache_key] == response
def test_cache_key_with_role_session_name(self):
response = {
@@ -367,8 +368,8 @@ class TestAssumeRoleCredentialFetcher(BaseEnvVar):
cache_key = (
'2964201f5648c8be5b9460a9cf842d73a266daf2'
)
- self.assertIn(cache_key, cache)
- self.assertEqual(cache[cache_key], response)
+ assert cache_key in cache
+ assert cache[cache_key] == response
def test_cache_key_with_policy(self):
response = {
@@ -402,8 +403,8 @@ class TestAssumeRoleCredentialFetcher(BaseEnvVar):
cache_key = (
'176f223d915e82456c253545e192aa21d68f5ab8'
)
- self.assertIn(cache_key, cache)
- self.assertEqual(cache[cache_key], response)
+ assert cache_key in cache
+ assert cache[cache_key] == response
def test_assume_role_in_cache_but_expired(self):
response = {
@@ -432,7 +433,7 @@ class TestAssumeRoleCredentialFetcher(BaseEnvVar):
expected = self.get_expected_creds_from_response(response)
response = refresher.fetch_credentials()
- self.assertEqual(response, expected)
+ assert response == expected
def test_role_session_name_can_be_provided(self):
response = {
@@ -597,7 +598,7 @@ class TestAssumeRoleCredentialFetcher(BaseEnvVar):
client = client_creator.return_value
assume_role_calls = client.assume_role.call_args_list
- self.assertEqual(len(assume_role_calls), 2, assume_role_calls)
+ assert len(assume_role_calls) == 2, assume_role_calls
def test_mfa_refresh_enabled(self):
responses = [{
@@ -646,7 +647,7 @@ class TestAssumeRoleCredentialFetcher(BaseEnvVar):
'TokenCode': token_code
}
]
- self.assertEqual(calls, expected_calls)
+ assert calls == expected_calls
class TestAssumeRoleWithWebIdentityCredentialFetcher(BaseEnvVar):
@@ -698,7 +699,7 @@ class TestAssumeRoleWithWebIdentityCredentialFetcher(BaseEnvVar):
expected_response = self.get_expected_creds_from_response(response)
response = refresher.fetch_credentials()
- self.assertEqual(response, expected_response)
+ assert response == expected_response
def test_retrieves_from_cache(self):
date_in_future = datetime.utcnow() + timedelta(seconds=1000)
@@ -725,7 +726,7 @@ class TestAssumeRoleWithWebIdentityCredentialFetcher(BaseEnvVar):
)
response = refresher.fetch_credentials()
- self.assertEqual(response, expected_response)
+ assert response == expected_response
client_creator.assert_not_called()
def test_assume_role_in_cache_but_expired(self):
@@ -755,7 +756,7 @@ class TestAssumeRoleWithWebIdentityCredentialFetcher(BaseEnvVar):
expected = self.get_expected_creds_from_response(response)
response = refresher.fetch_credentials()
- self.assertEqual(response, expected)
+ assert response == expected
class TestAssumeRoleWithWebIdentityCredentialProvider(unittest.TestCase):
@@ -815,9 +816,9 @@ class TestAssumeRoleWithWebIdentityCredentialProvider(unittest.TestCase):
creds = provider.load()
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertEqual(creds.token, 'baz')
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token == 'baz'
mock_loader_cls.assert_called_with('/some/path/token.jwt')
def test_assume_role_retrieves_from_cache(self):
@@ -849,9 +850,9 @@ class TestAssumeRoleWithWebIdentityCredentialProvider(unittest.TestCase):
creds = provider.load()
- self.assertEqual(creds.access_key, 'foo-cached')
- self.assertEqual(creds.secret_key, 'bar-cached')
- self.assertEqual(creds.token, 'baz-cached')
+ assert creds.access_key == 'foo-cached'
+ assert creds.secret_key == 'bar-cached'
+ assert creds.token == 'baz-cached'
client_creator.assert_not_called()
def test_assume_role_in_cache_but_expired(self):
@@ -887,9 +888,9 @@ class TestAssumeRoleWithWebIdentityCredentialProvider(unittest.TestCase):
creds = provider.load()
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertEqual(creds.token, 'baz')
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token == 'baz'
mock_loader_cls.assert_called_with('/some/path/token.jwt')
def test_role_session_name_provided(self):
@@ -931,7 +932,7 @@ class TestAssumeRoleWithWebIdentityCredentialProvider(unittest.TestCase):
profile_name=self.profile_name,
)
# If the role arn isn't set but the token path is raise an error
- with self.assertRaises(botocore.exceptions.InvalidConfigError):
+ with pytest.raises(botocore.exceptions.InvalidConfigError):
provider.load()
@@ -944,10 +945,10 @@ class TestEnvVar(BaseEnvVar):
}
provider = credentials.EnvProvider(environ)
creds = provider.load()
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertEqual(creds.method, 'env')
+ assert creds is not None
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.method == 'env'
def test_envvars_found_with_security_token(self):
environ = {
@@ -957,11 +958,11 @@ class TestEnvVar(BaseEnvVar):
}
provider = credentials.EnvProvider(environ)
creds = provider.load()
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertEqual(creds.token, 'baz')
- self.assertEqual(creds.method, 'env')
+ assert creds is not None
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token == 'baz'
+ assert creds.method == 'env'
def test_envvars_found_with_session_token(self):
environ = {
@@ -972,15 +973,16 @@ class TestEnvVar(BaseEnvVar):
provider = credentials.EnvProvider(environ)
creds = provider.load()
self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertEqual(creds.token, 'baz')
- self.assertEqual(creds.method, 'env')
+ assert creds is not None
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token == 'baz'
+ assert creds.method == 'env'
def test_envvars_not_found(self):
provider = credentials.EnvProvider(environ={})
creds = provider.load()
- self.assertIsNone(creds)
+ assert creds is None
def test_envvars_empty_string(self):
environ = {
@@ -990,7 +992,7 @@ class TestEnvVar(BaseEnvVar):
}
provider = credentials.EnvProvider(environ)
creds = provider.load()
- self.assertIsNone(creds)
+ assert creds is None
def test_expiry_omitted_if_envvar_empty(self):
environ = {
@@ -1004,10 +1006,10 @@ class TestEnvVar(BaseEnvVar):
# Because we treat empty env vars the same as not being provided,
# we should return static credentials and not a refreshable
# credential.
- self.assertNotIsInstance(creds, credentials.RefreshableCredentials)
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertEqual(creds.token, 'baz')
+ assert not isinstance(creds, credentials.RefreshableCredentials)
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token == 'baz'
def test_error_when_expiry_required_but_empty(self):
expiry_time = datetime.now(tzlocal()) - timedelta(hours=1)
@@ -1021,7 +1023,7 @@ class TestEnvVar(BaseEnvVar):
del environ['AWS_CREDENTIAL_EXPIRATION']
- with self.assertRaises(botocore.exceptions.PartialCredentialsError):
+ with pytest.raises(botocore.exceptions.PartialCredentialsError):
creds.get_frozen_credentials()
def test_can_override_env_var_mapping(self):
@@ -1041,9 +1043,9 @@ class TestEnvVar(BaseEnvVar):
environ, mapping
)
creds = provider.load()
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertEqual(creds.token, 'baz')
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token == 'baz'
def test_can_override_partial_env_var_mapping(self):
# Only changing the access key mapping.
@@ -1059,9 +1061,9 @@ class TestEnvVar(BaseEnvVar):
environ, {'access_key': 'FOO_ACCESS_KEY'}
)
creds = provider.load()
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertEqual(creds.token, 'baz')
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token == 'baz'
def test_can_override_expiry_env_var_mapping(self):
expiry_time = datetime.now(tzlocal()) - timedelta(hours=1)
@@ -1083,7 +1085,7 @@ class TestEnvVar(BaseEnvVar):
"Credentials were refreshed, but the refreshed credentials are "
"still expired."
)
- with six.assertRaisesRegex(self, RuntimeError, error_message):
+ with pytest.raises(RuntimeError, match=error_message):
creds.get_frozen_credentials()
def test_partial_creds_is_an_error(self):
@@ -1095,7 +1097,7 @@ class TestEnvVar(BaseEnvVar):
# Missing the AWS_SECRET_ACCESS_KEY
}
provider = credentials.EnvProvider(environ)
- with self.assertRaises(botocore.exceptions.PartialCredentialsError):
+ with pytest.raises(botocore.exceptions.PartialCredentialsError):
provider.load()
def test_partial_creds_is_an_error_empty_string(self):
@@ -1107,7 +1109,7 @@ class TestEnvVar(BaseEnvVar):
'AWS_SECRET_ACCESS_KEY': '',
}
provider = credentials.EnvProvider(environ)
- with self.assertRaises(botocore.exceptions.PartialCredentialsError):
+ with pytest.raises(botocore.exceptions.PartialCredentialsError):
provider.load()
def test_missing_access_key_id_raises_error(self):
@@ -1125,7 +1127,7 @@ class TestEnvVar(BaseEnvVar):
# Since the credentials are expired, we'll trigger a refresh
# whenever we try to access them. At that refresh time, the relevant
# environment variables are incomplete, so an error will be raised.
- with self.assertRaises(botocore.exceptions.PartialCredentialsError):
+ with pytest.raises(botocore.exceptions.PartialCredentialsError):
creds.get_frozen_credentials()
def test_credentials_refresh(self):
@@ -1139,7 +1141,7 @@ class TestEnvVar(BaseEnvVar):
}
provider = credentials.EnvProvider(environ)
creds = provider.load()
- self.assertIsInstance(creds, credentials.RefreshableCredentials)
+ assert isinstance(creds, credentials.RefreshableCredentials)
# Since the credentials are expired, we'll trigger a refresh whenever
# we try to access them. But at this point the environment hasn't been
@@ -1149,7 +1151,7 @@ class TestEnvVar(BaseEnvVar):
"Credentials were refreshed, but the refreshed credentials are "
"still expired."
)
- with six.assertRaisesRegex(self, RuntimeError, error_message):
+ with pytest.raises(RuntimeError, match=error_message):
creds.get_frozen_credentials()
# Now we update the environment with non-expired credentials,
@@ -1163,9 +1165,9 @@ class TestEnvVar(BaseEnvVar):
})
frozen = creds.get_frozen_credentials()
- self.assertEqual(frozen.access_key, 'bin')
- self.assertEqual(frozen.secret_key, 'bam')
- self.assertEqual(frozen.token, 'biz')
+ assert frozen.access_key == 'bin'
+ assert frozen.secret_key == 'bam'
+ assert frozen.token == 'biz'
def test_credentials_only_refresh_when_needed(self):
expiry_time = datetime.now(tzlocal()) + timedelta(hours=2)
@@ -1193,9 +1195,9 @@ class TestEnvVar(BaseEnvVar):
})
frozen = creds.get_frozen_credentials()
- self.assertEqual(frozen.access_key, 'foo')
- self.assertEqual(frozen.secret_key, 'bar')
- self.assertEqual(frozen.token, 'baz')
+ assert frozen.access_key == 'foo'
+ assert frozen.secret_key == 'bar'
+ assert frozen.token == 'baz'
def test_credentials_not_refreshable_if_no_expiry_present(self):
environ = {
@@ -1205,8 +1207,8 @@ class TestEnvVar(BaseEnvVar):
}
provider = credentials.EnvProvider(environ)
creds = provider.load()
- self.assertNotIsInstance(creds, credentials.RefreshableCredentials)
- self.assertIsInstance(creds, credentials.Credentials)
+ assert not isinstance(creds, credentials.RefreshableCredentials)
+ assert isinstance(creds, credentials.Credentials) is True
def test_credentials_do_not_become_refreshable(self):
environ = {
@@ -1217,9 +1219,9 @@ class TestEnvVar(BaseEnvVar):
provider = credentials.EnvProvider(environ)
creds = provider.load()
frozen = creds.get_frozen_credentials()
- self.assertEqual(frozen.access_key, 'foo')
- self.assertEqual(frozen.secret_key, 'bar')
- self.assertEqual(frozen.token, 'baz')
+ assert frozen.access_key == 'foo'
+ assert frozen.secret_key == 'bar'
+ assert frozen.token == 'baz'
expiry_time = datetime.now(tzlocal()) - timedelta(hours=1)
environ.update({
@@ -1230,10 +1232,10 @@ class TestEnvVar(BaseEnvVar):
})
frozen = creds.get_frozen_credentials()
- self.assertEqual(frozen.access_key, 'foo')
- self.assertEqual(frozen.secret_key, 'bar')
- self.assertEqual(frozen.token, 'baz')
- self.assertNotIsInstance(creds, credentials.RefreshableCredentials)
+ assert frozen.access_key == 'foo'
+ assert frozen.secret_key == 'bar'
+ assert frozen.token == 'baz'
+ assert not isinstance(creds, credentials.RefreshableCredentials)
def test_credentials_throw_error_if_expiry_goes_away(self):
expiry_time = datetime.now(tzlocal()) - timedelta(hours=1)
@@ -1247,7 +1249,7 @@ class TestEnvVar(BaseEnvVar):
del environ['AWS_CREDENTIAL_EXPIRATION']
- with self.assertRaises(credentials.PartialCredentialsError):
+ with pytest.raises(credentials.PartialCredentialsError):
creds.get_frozen_credentials()
@@ -1267,11 +1269,11 @@ class TestSharedCredentialsProvider(BaseEnvVar):
creds_filename='~/.aws/creds', profile_name='default',
ini_parser=self.ini_parser)
creds = provider.load()
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertIsNone(creds.token)
- self.assertEqual(creds.method, 'shared-credentials-file')
+ assert creds is not None
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token is None
+ assert creds.method == 'shared-credentials-file'
def test_partial_creds_raise_error(self):
self.ini_parser.return_value = {
@@ -1283,7 +1285,7 @@ class TestSharedCredentialsProvider(BaseEnvVar):
provider = credentials.SharedCredentialProvider(
creds_filename='~/.aws/creds', profile_name='default',
ini_parser=self.ini_parser)
- with self.assertRaises(botocore.exceptions.PartialCredentialsError):
+ with pytest.raises(botocore.exceptions.PartialCredentialsError):
provider.load()
def test_credentials_file_exists_with_session_token(self):
@@ -1298,11 +1300,11 @@ class TestSharedCredentialsProvider(BaseEnvVar):
creds_filename='~/.aws/creds', profile_name='default',
ini_parser=self.ini_parser)
creds = provider.load()
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertEqual(creds.token, 'baz')
- self.assertEqual(creds.method, 'shared-credentials-file')
+ assert creds is not None
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token == 'baz'
+ assert creds.method == 'shared-credentials-file'
def test_credentials_file_with_multiple_profiles(self):
self.ini_parser.return_value = {
@@ -1323,11 +1325,11 @@ class TestSharedCredentialsProvider(BaseEnvVar):
creds_filename='~/.aws/creds', profile_name='dev',
ini_parser=self.ini_parser)
creds = provider.load()
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'd')
- self.assertEqual(creds.secret_key, 'e')
- self.assertEqual(creds.token, 'f')
- self.assertEqual(creds.method, 'shared-credentials-file')
+ assert creds is not None
+ assert creds.access_key == 'd'
+ assert creds.secret_key == 'e'
+ assert creds.token == 'f'
+ assert creds.method == 'shared-credentials-file'
def test_credentials_file_does_not_exist_returns_none(self):
# It's ok if the credentials file does not exist, we should
@@ -1338,7 +1340,7 @@ class TestSharedCredentialsProvider(BaseEnvVar):
creds_filename='~/.aws/creds', profile_name='dev',
ini_parser=self.ini_parser)
creds = provider.load()
- self.assertIsNone(creds)
+ assert creds is None
class TestConfigFileProvider(BaseEnvVar):
@@ -1362,11 +1364,11 @@ class TestConfigFileProvider(BaseEnvVar):
provider = credentials.ConfigProvider('cli.cfg', 'default',
self.parser)
creds = provider.load()
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'a')
- self.assertEqual(creds.secret_key, 'b')
- self.assertEqual(creds.token, 'c')
- self.assertEqual(creds.method, 'config-file')
+ assert creds is not None
+ assert creds.access_key == 'a'
+ assert creds.secret_key == 'b'
+ assert creds.token == 'c'
+ assert creds.method == 'config-file'
def test_config_file_missing_profile_config(self):
# Referring to a profile that's not in the config file
@@ -1375,7 +1377,7 @@ class TestConfigFileProvider(BaseEnvVar):
provider = credentials.ConfigProvider('cli.cfg', profile_name,
self.parser)
creds = provider.load()
- self.assertIsNone(creds)
+ assert creds is None
def test_config_file_errors_ignored(self):
# We should move on to the next provider if the config file
@@ -1385,7 +1387,7 @@ class TestConfigFileProvider(BaseEnvVar):
provider = credentials.ConfigProvider('cli.cfg', 'default',
self.parser)
creds = provider.load()
- self.assertIsNone(creds)
+ assert creds is None
def test_partial_creds_is_error(self):
profile_config = {
@@ -1396,7 +1398,7 @@ class TestConfigFileProvider(BaseEnvVar):
parser = mock.Mock()
parser.return_value = parsed
provider = credentials.ConfigProvider('cli.cfg', 'default', parser)
- with self.assertRaises(botocore.exceptions.PartialCredentialsError):
+ with pytest.raises(botocore.exceptions.PartialCredentialsError):
provider.load()
@@ -1418,11 +1420,11 @@ class TestBotoProvider(BaseEnvVar):
provider = credentials.BotoProvider(environ=environ,
ini_parser=self.ini_parser)
creds = provider.load()
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'a')
- self.assertEqual(creds.secret_key, 'b')
- self.assertIsNone(creds.token)
- self.assertEqual(creds.method, 'boto-config')
+ assert creds is not None
+ assert creds.access_key == 'a'
+ assert creds.secret_key == 'b'
+ assert creds.token is None
+ assert creds.method == 'boto-config'
def test_env_var_set_for_boto_location(self):
environ = {
@@ -1439,11 +1441,11 @@ class TestBotoProvider(BaseEnvVar):
provider = credentials.BotoProvider(environ=environ,
ini_parser=self.ini_parser)
creds = provider.load()
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'a')
- self.assertEqual(creds.secret_key, 'b')
- self.assertIsNone(creds.token)
- self.assertEqual(creds.method, 'boto-config')
+ assert creds is not None
+ assert creds.access_key == 'a'
+ assert creds.secret_key == 'b'
+ assert creds.token is None
+ assert creds.method == 'boto-config'
# Assert that the parser was called with the filename specified
# in the env var.
@@ -1455,7 +1457,7 @@ class TestBotoProvider(BaseEnvVar):
provider = credentials.BotoProvider(environ={},
ini_parser=self.ini_parser)
creds = provider.load()
- self.assertIsNone(creds)
+ assert creds is None
def test_partial_creds_is_error(self):
ini_parser = mock.Mock()
@@ -1467,7 +1469,7 @@ class TestBotoProvider(BaseEnvVar):
}
provider = credentials.BotoProvider(environ={},
ini_parser=ini_parser)
- with self.assertRaises(botocore.exceptions.PartialCredentialsError):
+ with pytest.raises(botocore.exceptions.PartialCredentialsError):
provider.load()
@@ -1476,7 +1478,7 @@ class TestOriginalEC2Provider(BaseEnvVar):
def test_load_ec2_credentials_file_not_exist(self):
provider = credentials.OriginalEC2Provider(environ={})
creds = provider.load()
- self.assertIsNone(creds)
+ assert creds is None
def test_load_ec2_credentials_file_exists(self):
environ = {
@@ -1490,11 +1492,11 @@ class TestOriginalEC2Provider(BaseEnvVar):
provider = credentials.OriginalEC2Provider(environ=environ,
parser=parser)
creds = provider.load()
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'a')
- self.assertEqual(creds.secret_key, 'b')
- self.assertIsNone(creds.token)
- self.assertEqual(creds.method, 'ec2-credentials-file')
+ assert creds is not None
+ assert creds.access_key == 'a'
+ assert creds.secret_key == 'b'
+ assert creds.token is None
+ assert creds.method == 'ec2-credentials-file'
class TestInstanceMetadataProvider(BaseEnvVar):
@@ -1512,11 +1514,11 @@ class TestInstanceMetadataProvider(BaseEnvVar):
provider = credentials.InstanceMetadataProvider(
iam_role_fetcher=fetcher)
creds = provider.load()
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'a')
- self.assertEqual(creds.secret_key, 'b')
- self.assertEqual(creds.token, 'c')
- self.assertEqual(creds.method, 'iam-role')
+ assert creds is not None
+ assert creds.access_key == 'a'
+ assert creds.secret_key == 'b'
+ assert creds.token == 'c'
+ assert creds.method == 'iam-role'
def test_no_role_creds_exist(self):
fetcher = mock.Mock()
@@ -1524,7 +1526,7 @@ class TestInstanceMetadataProvider(BaseEnvVar):
provider = credentials.InstanceMetadataProvider(
iam_role_fetcher=fetcher)
creds = provider.load()
- self.assertIsNone(creds)
+ assert creds is None
fetcher.retrieve_iam_role_credentials.assert_called_with()
@@ -1543,18 +1545,18 @@ class CredentialResolverTest(BaseEnvVar):
self.provider1.load.return_value = self.fake_creds
resolver = credentials.CredentialResolver(providers=[self.provider1])
creds = resolver.load_credentials()
- self.assertEqual(creds.access_key, 'a')
- self.assertEqual(creds.secret_key, 'b')
- self.assertEqual(creds.token, 'c')
+ assert creds.access_key == 'a'
+ assert creds.secret_key == 'b'
+ assert creds.token == 'c'
def test_get_provider_by_name(self):
resolver = credentials.CredentialResolver(providers=[self.provider1])
result = resolver.get_provider('provider1')
- self.assertIs(result, self.provider1)
+ assert result is self.provider1
def test_get_unknown_provider_raises_error(self):
resolver = credentials.CredentialResolver(providers=[self.provider1])
- with self.assertRaises(botocore.exceptions.UnknownCredentialError):
+ with pytest.raises(botocore.exceptions.UnknownCredentialError):
resolver.get_provider('unknown-foo')
def test_first_credential_non_none_wins(self):
@@ -1563,9 +1565,9 @@ class CredentialResolverTest(BaseEnvVar):
resolver = credentials.CredentialResolver(providers=[self.provider1,
self.provider2])
creds = resolver.load_credentials()
- self.assertEqual(creds.access_key, 'a')
- self.assertEqual(creds.secret_key, 'b')
- self.assertEqual(creds.token, 'c')
+ assert creds.access_key == 'a'
+ assert creds.secret_key == 'b'
+ assert creds.token == 'c'
self.provider1.load.assert_called_with()
self.provider2.load.assert_called_with()
@@ -1575,7 +1577,7 @@ class CredentialResolverTest(BaseEnvVar):
resolver = credentials.CredentialResolver(providers=[self.provider1,
self.provider2])
creds = resolver.load_credentials()
- self.assertIsNone(creds)
+ assert creds is None
def test_inject_additional_providers_after_existing(self):
self.provider1.load.return_value = None
@@ -1594,16 +1596,16 @@ class CredentialResolverTest(BaseEnvVar):
resolver.insert_after('provider1', new_provider)
creds = resolver.load_credentials()
- self.assertIsNotNone(creds)
+ assert creds is not None
- self.assertEqual(creds.access_key, 'd')
- self.assertEqual(creds.secret_key, 'e')
- self.assertEqual(creds.token, 'f')
+ assert creds.access_key == 'd'
+ assert creds.secret_key == 'e'
+ assert creds.token == 'f'
# Provider 1 should have been called, but provider2 should
# *not* have been called because new_provider already returned
# a non-None response.
self.provider1.load.assert_called_with()
- self.assertTrue(not self.provider2.called)
+ assert not self.provider2.called
def test_inject_provider_before_existing(self):
new_provider = mock.Mock()
@@ -1614,9 +1616,9 @@ class CredentialResolverTest(BaseEnvVar):
self.provider2])
resolver.insert_before(self.provider1.METHOD, new_provider)
creds = resolver.load_credentials()
- self.assertEqual(creds.access_key, 'x')
- self.assertEqual(creds.secret_key, 'y')
- self.assertEqual(creds.token, 'z')
+ assert creds.access_key == 'x'
+ assert creds.secret_key == 'y'
+ assert creds.token == 'z'
def test_can_remove_providers(self):
self.provider1.load.return_value = credentials.Credentials(
@@ -1627,11 +1629,11 @@ class CredentialResolverTest(BaseEnvVar):
self.provider2])
resolver.remove('provider1')
creds = resolver.load_credentials()
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'd')
- self.assertEqual(creds.secret_key, 'e')
- self.assertEqual(creds.token, 'f')
- self.assertTrue(not self.provider1.load.called)
+ assert creds is not None
+ assert creds.access_key == 'd'
+ assert creds.secret_key == 'e'
+ assert creds.token == 'f'
+ assert not self.provider1.load.called
self.provider2.load.assert_called_with()
def test_provider_unknown(self):
@@ -1641,7 +1643,7 @@ class CredentialResolverTest(BaseEnvVar):
resolver.remove('providerFOO')
# But an error IS raised if you try to insert after an unknown
# provider.
- with self.assertRaises(botocore.exceptions.UnknownCredentialError):
+ with pytest.raises(botocore.exceptions.UnknownCredentialError):
resolver.insert_after('providerFoo', None)
@@ -1683,28 +1685,26 @@ class TestCreateCredentialResolver(BaseEnvVar):
def test_create_credential_resolver(self):
resolver = credentials.create_credential_resolver(self.session)
- self.assertIsInstance(resolver, credentials.CredentialResolver)
+ assert isinstance(resolver, credentials.CredentialResolver)
def test_explicit_profile_ignores_env_provider(self):
self.session.set_config_variable('profile', 'dev')
resolver = credentials.create_credential_resolver(self.session)
- self.assertTrue(
- all(not isinstance(p, EnvProvider) for p in resolver.providers))
+ assert all(not isinstance(p, EnvProvider) for p in resolver.providers)
def test_no_profile_checks_env_provider(self):
# If no profile is provided,
self.config_loader.set_config_variable('profile', None)
resolver = credentials.create_credential_resolver(self.session)
# Then an EnvProvider should be part of our credential lookup chain.
- self.assertTrue(
- any(isinstance(p, EnvProvider) for p in resolver.providers))
+ assert any(isinstance(p, EnvProvider) for p in resolver.providers)
def test_default_cache(self):
resolver = credentials.create_credential_resolver(self.session)
cache = resolver.get_provider('assume-role').cache
- self.assertIsInstance(cache, dict)
- self.assertEqual(cache, {})
+ assert isinstance(cache, dict)
+ assert cache == {}
def test_custom_cache(self):
custom_cache = credentials.JSONFileCache()
@@ -1712,7 +1712,7 @@ class TestCreateCredentialResolver(BaseEnvVar):
self.session, custom_cache
)
cache = resolver.get_provider('assume-role').cache
- self.assertIs(cache, custom_cache)
+ assert cache is custom_cache
class TestCanonicalNameSourceProvider(BaseEnvVar):
@@ -1732,7 +1732,7 @@ class TestCanonicalNameSourceProvider(BaseEnvVar):
])
self.custom_provider1.load.return_value = self.fake_creds
result = provider.source_credentials('CustomProvider1')
- self.assertIs(result, self.fake_creds)
+ assert result is self.fake_creds
def test_load_source_credentials_case_insensitive(self):
provider = credentials.CanonicalNameCredentialSourcer(providers=[
@@ -1740,12 +1740,12 @@ class TestCanonicalNameSourceProvider(BaseEnvVar):
])
self.custom_provider1.load.return_value = self.fake_creds
result = provider.source_credentials('cUsToMpRoViDeR1')
- self.assertIs(result, self.fake_creds)
+ assert result is self.fake_creds
def test_load_unknown_canonical_name_raises_error(self):
provider = credentials.CanonicalNameCredentialSourcer(providers=[
self.custom_provider1])
- with self.assertRaises(botocore.exceptions.UnknownCredentialError):
+ with pytest.raises(botocore.exceptions.UnknownCredentialError):
provider.source_credentials('CustomUnknown')
def _assert_assume_role_creds_returned_with_shared_file(self, provider):
@@ -1765,11 +1765,11 @@ class TestCanonicalNameSourceProvider(BaseEnvVar):
)
creds = source.source_credentials(provider.CANONICAL_NAME)
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'a')
- self.assertEqual(creds.secret_key, 'b')
- self.assertEqual(creds.token, 'c')
- self.assertFalse(provider.load.called)
+ assert creds is not None
+ assert creds.access_key == 'a'
+ assert creds.secret_key == 'b'
+ assert creds.token == 'c'
+ assert not provider.load.called
def _assert_returns_creds_if_assume_role_not_used(self, provider):
assume_role_provider = mock.Mock(spec=AssumeRoleProvider)
@@ -1789,10 +1789,11 @@ class TestCanonicalNameSourceProvider(BaseEnvVar):
creds = source.source_credentials(provider.CANONICAL_NAME)
self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'd')
- self.assertEqual(creds.secret_key, 'e')
- self.assertEqual(creds.token, 'f')
- self.assertTrue(assume_role_provider.load.called)
+ assert creds is not None
+ assert creds.access_key == 'd'
+ assert creds.secret_key == 'e'
+ assert creds.token == 'f'
+ assert assume_role_provider.load.called
def test_assume_role_creds_returned_with_config_file(self):
provider = mock.Mock(spec=ConfigProvider)
@@ -1829,23 +1830,23 @@ class TestCanonicalNameSourceProvider(BaseEnvVar):
])
creds = provider.source_credentials('SharedConfig')
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'a')
- self.assertEqual(creds.secret_key, 'b')
- self.assertEqual(creds.token, 'c')
+ assert creds is not None
+ assert creds.access_key == 'a'
+ assert creds.secret_key == 'b'
+ assert creds.token == 'c'
creds = provider.source_credentials('SharedCredentials')
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'a')
- self.assertEqual(creds.secret_key, 'b')
- self.assertEqual(creds.token, 'c')
+ assert creds is not None
+ assert creds.access_key == 'a'
+ assert creds.secret_key == 'b'
+ assert creds.token == 'c'
def test_get_canonical_shared_files_without_assume_role(self):
provider = credentials.CanonicalNameCredentialSourcer(
providers=[self.custom_provider1])
- with self.assertRaises(botocore.exceptions.UnknownCredentialError):
+ with pytest.raises(botocore.exceptions.UnknownCredentialError):
provider.source_credentials('SharedConfig')
- with self.assertRaises(botocore.exceptions.UnknownCredentialError):
+ with pytest.raises(botocore.exceptions.UnknownCredentialError):
provider.source_credentials('SharedCredentials')
@@ -1911,10 +1912,9 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
client_creator, cache={}, profile_name='development')
creds = provider.load()
-
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertEqual(creds.token, 'baz')
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token == 'baz'
def test_assume_role_with_datetime(self):
response = {
@@ -1937,9 +1937,9 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
creds = provider.load()
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertEqual(creds.token, 'baz')
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token == 'baz'
def test_assume_role_refresher_serializes_datetime(self):
client = mock.Mock()
@@ -1956,7 +1956,7 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
}
refresh = create_assume_role_refresher(client, {})
expiry_time = refresh()['expiry_time']
- self.assertEqual(expiry_time, '2016-11-06T01:30:00UTC')
+ assert expiry_time == '2016-11-06T01:30:00UTC'
def test_assume_role_retrieves_from_cache(self):
date_in_future = datetime.utcnow() + timedelta(seconds=1000)
@@ -1982,9 +1982,9 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
creds = provider.load()
- self.assertEqual(creds.access_key, 'foo-cached')
- self.assertEqual(creds.secret_key, 'bar-cached')
- self.assertEqual(creds.token, 'baz-cached')
+ assert creds.access_key == 'foo-cached'
+ assert creds.secret_key == 'bar-cached'
+ assert creds.token == 'baz-cached'
def test_chain_prefers_cache(self):
date_in_future = datetime.utcnow() + timedelta(seconds=1000)
@@ -2017,9 +2017,9 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
creds = provider.load()
- self.assertEqual(creds.access_key, 'foo-cached')
- self.assertEqual(creds.secret_key, 'bar-cached')
- self.assertEqual(creds.token, 'baz-cached')
+ assert creds.access_key == 'foo-cached'
+ assert creds.secret_key == 'bar-cached'
+ assert creds.token == 'baz-cached'
def test_cache_key_is_windows_safe(self):
response = {
@@ -2045,8 +2045,8 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
cache_key = (
'3f8e35c8dca6211d496e830a2de723b2387921e3'
)
- self.assertIn(cache_key, cache)
- self.assertEqual(cache[cache_key], response)
+ assert cache_key in cache
+ assert cache[cache_key] == response
def test_cache_key_with_role_session_name(self):
response = {
@@ -2074,8 +2074,8 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
cache_key = (
'5e75ce21b6a64ab183b29c4a159b6f0248121d51'
)
- self.assertIn(cache_key, cache)
- self.assertEqual(cache[cache_key], response)
+ assert cache_key in cache
+ assert cache[cache_key] == response
def test_assume_role_in_cache_but_expired(self):
expired_creds = datetime.now(tzlocal())
@@ -2105,9 +2105,9 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
creds = provider.load()
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertEqual(creds.token, 'baz')
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token == 'baz'
def test_role_session_name_provided(self):
dev_profile = self.fake_config['profiles']['development']
@@ -2268,11 +2268,11 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
client = client_creator.return_value
assume_role_calls = client.assume_role.call_args_list
- self.assertEqual(len(assume_role_calls), 2, assume_role_calls)
+ assert len(assume_role_calls) == 2, assume_role_calls
# The args should be identical. That is, the second
# assume_role call should have the exact same args as the
# initial assume_role call.
- self.assertEqual(assume_role_calls[0], assume_role_calls[1])
+ assert assume_role_calls[0] == assume_role_calls[1]
def test_assume_role_mfa_cannot_refresh_credentials(self):
# Note: we should look into supporting optional behavior
@@ -2305,7 +2305,7 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
creds.get_frozen_credentials()
local_now.return_value = expiration_time
- with self.assertRaises(credentials.RefreshWithMFAUnsupportedError):
+ with pytest.raises(credentials.RefreshWithMFAUnsupportedError):
# access_key is a property that will refresh credentials
# if they're expired. Because we set the expiry time to
# something in the past, this will trigger the refresh
@@ -2325,7 +2325,7 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
# is a noop and will not return credentials (which means we
# move on to the next provider).
creds = provider.load()
- self.assertIsNone(creds)
+ assert creds is None
def test_source_profile_not_provided(self):
del self.fake_config['profiles']['development']['source_profile']
@@ -2334,7 +2334,7 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
mock.Mock(), cache={}, profile_name='development')
# source_profile is required, we shoudl get an error.
- with self.assertRaises(botocore.exceptions.PartialCredentialsError):
+ with pytest.raises(botocore.exceptions.PartialCredentialsError):
provider.load()
def test_source_profile_does_not_exist(self):
@@ -2345,7 +2345,7 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
mock.Mock(), cache={}, profile_name='development')
# source_profile is required, we shoudl get an error.
- with self.assertRaises(botocore.exceptions.InvalidConfigError):
+ with pytest.raises(botocore.exceptions.InvalidConfigError):
provider.load()
def test_incomplete_source_credentials_raises_error(self):
@@ -2354,7 +2354,7 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
self.create_config_loader(),
mock.Mock(), cache={}, profile_name='development')
- with self.assertRaises(botocore.exceptions.PartialCredentialsError):
+ with pytest.raises(botocore.exceptions.PartialCredentialsError):
provider.load()
def test_source_profile_and_credential_source_provided(self):
@@ -2364,7 +2364,7 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
self.create_config_loader(),
mock.Mock(), cache={}, profile_name='development')
- with self.assertRaises(botocore.exceptions.InvalidConfigError):
+ with pytest.raises(botocore.exceptions.InvalidConfigError):
provider.load()
def test_credential_source_with_no_resolver_configured(self):
@@ -2372,7 +2372,7 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
self.create_config_loader(),
mock.Mock(), cache={}, profile_name='non-static')
- with self.assertRaises(botocore.exceptions.InvalidConfigError):
+ with pytest.raises(botocore.exceptions.InvalidConfigError):
provider.load()
def test_credential_source_with_no_providers_configured(self):
@@ -2382,7 +2382,7 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
credential_sourcer=credentials.CanonicalNameCredentialSourcer([])
)
- with self.assertRaises(botocore.exceptions.InvalidConfigError):
+ with pytest.raises(botocore.exceptions.InvalidConfigError):
provider.load()
def test_credential_source_not_among_providers(self):
@@ -2401,7 +2401,7 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
# calls for the Environment credential provider as the credentials
# source. Since that isn't one of the configured source providers,
# an error is thrown.
- with self.assertRaises(botocore.exceptions.InvalidConfigError):
+ with pytest.raises(botocore.exceptions.InvalidConfigError):
provider.load()
def test_assume_role_with_credential_source(self):
@@ -2439,9 +2439,9 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
)
creds = provider.load()
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertEqual(creds.token, 'baz')
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token == 'baz'
client_creator.assert_called_with(
'sts', aws_access_key_id=fake_creds.access_key,
aws_secret_access_key=fake_creds.secret_key,
@@ -2469,7 +2469,7 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
[fake_provider])
)
- with self.assertRaises(botocore.exceptions.CredentialRetrievalError):
+ with pytest.raises(botocore.exceptions.CredentialRetrievalError):
provider.load()
def test_source_profile_can_reference_self(self):
@@ -2500,9 +2500,9 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
)
creds = provider.load()
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertEqual(creds.token, 'baz')
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token, 'baz'
def test_infinite_looping_profiles_raises_error(self):
config = {
@@ -2523,7 +2523,7 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
mock.Mock(), cache={}, profile_name='first'
)
- with self.assertRaises(botocore.credentials.InfiniteLoopConfigError):
+ with pytest.raises(botocore.credentials.InfiniteLoopConfigError):
provider.load()
def test_recursive_assume_role(self):
@@ -2568,9 +2568,9 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
creds = provider.load()
expected_creds = assume_responses[-1]
- self.assertEqual(creds.access_key, expected_creds.access_key)
- self.assertEqual(creds.secret_key, expected_creds.secret_key)
- self.assertEqual(creds.token, expected_creds.token)
+ assert creds.access_key == expected_creds.access_key
+ assert creds.secret_key == expected_creds.secret_key
+ assert creds.token == expected_creds.token
client_creator.assert_has_calls([
mock.call(
@@ -2607,7 +2607,7 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
creds = provider.load().get_frozen_credentials()
- self.assertEqual(client_creator.call_count, 1)
+ assert client_creator.call_count == 1
client_creator.assert_called_with(
'sts',
aws_access_key_id='foo-profile-access-key',
@@ -2615,9 +2615,9 @@ class TestAssumeRoleCredentialProvider(unittest.TestCase):
aws_session_token='foo-profile-token',
)
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertEqual(creds.token, 'baz')
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token == 'baz'
class ProfileProvider(object):
@@ -2646,40 +2646,40 @@ class TestJSONCache(unittest.TestCase):
def test_supports_contains_check(self):
# By default the cache is empty because we're
# using a new temp dir everytime.
- self.assertTrue('mykey' not in self.cache)
+ assert 'mykey' not in self.cache
def test_add_key_and_contains_check(self):
self.cache['mykey'] = {'foo': 'bar'}
- self.assertTrue('mykey' in self.cache)
+ assert 'mykey' in self.cache
def test_added_key_can_be_retrieved(self):
self.cache['mykey'] = {'foo': 'bar'}
- self.assertEqual(self.cache['mykey'], {'foo': 'bar'})
+ assert self.cache['mykey'] == {'foo': 'bar'}
def test_only_accepts_json_serializable_data(self):
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
# set()'s cannot be serialized to a JSON string.
self.cache['mykey'] = set()
def test_can_override_existing_values(self):
self.cache['mykey'] = {'foo': 'bar'}
self.cache['mykey'] = {'baz': 'newvalue'}
- self.assertEqual(self.cache['mykey'], {'baz': 'newvalue'})
+ assert self.cache['mykey'] == {'baz': 'newvalue'}
def test_can_add_multiple_keys(self):
self.cache['mykey'] = {'foo': 'bar'}
self.cache['mykey2'] = {'baz': 'qux'}
- self.assertEqual(self.cache['mykey'], {'foo': 'bar'})
- self.assertEqual(self.cache['mykey2'], {'baz': 'qux'})
+ assert self.cache['mykey'] == {'foo': 'bar'}
+ assert self.cache['mykey2'] == {'baz': 'qux'}
def test_working_dir_does_not_exist(self):
working_dir = os.path.join(self.tempdir, 'foo')
cache = credentials.JSONFileCache(working_dir)
cache['foo'] = {'bar': 'baz'}
- self.assertEqual(cache['foo'], {'bar': 'baz'})
+ assert cache['foo'] == {'bar': 'baz'}
def test_key_error_raised_when_cache_key_does_not_exist(self):
- with self.assertRaises(KeyError):
+ with pytest.raises(KeyError):
self.cache['foo']
def test_file_is_truncated_before_writing(self):
@@ -2687,13 +2687,13 @@ class TestJSONCache(unittest.TestCase):
'really long key in the cache': 'really long value in cache'}
# Now overwrite it with a smaller value.
self.cache['mykey'] = {'a': 'b'}
- self.assertEqual(self.cache['mykey'], {'a': 'b'})
+ assert self.cache['mykey'] == {'a': 'b'}
@skip_if_windows('File permissions tests not supported on Windows.')
def test_permissions_for_file_restricted(self):
self.cache['mykey'] = {'foo': 'bar'}
filename = os.path.join(self.tempdir, 'mykey.json')
- self.assertEqual(os.stat(filename).st_mode & 0xFFF, 0o600)
+ assert os.stat(filename).st_mode & 0xFFF == 0o600
class TestRefreshLogic(unittest.TestCase):
@@ -2705,8 +2705,7 @@ class TestRefreshLogic(unittest.TestCase):
mandatory_refresh=3,
advisory_refresh=3)
temp = creds.get_frozen_credentials()
- self.assertEqual(
- temp, credentials.ReadOnlyCredentials('1', '1', '1'))
+ assert temp == credentials.ReadOnlyCredentials('1', '1', '1')
def test_advisory_refresh_needed(self):
creds = IntegerRefresher(
@@ -2716,8 +2715,7 @@ class TestRefreshLogic(unittest.TestCase):
mandatory_refresh=2,
advisory_refresh=5)
temp = creds.get_frozen_credentials()
- self.assertEqual(
- temp, credentials.ReadOnlyCredentials('1', '1', '1'))
+ assert temp == credentials.ReadOnlyCredentials('1', '1', '1')
def test_refresh_fails_is_not_an_error_during_advisory_period(self):
fail_refresh = mock.Mock(side_effect=Exception("refresh failed"))
@@ -2729,13 +2727,12 @@ class TestRefreshLogic(unittest.TestCase):
)
temp = creds.get_frozen_credentials()
# We should have called the refresh function.
- self.assertTrue(fail_refresh.called)
+ assert fail_refresh.called
# The fail_refresh function will raise an exception.
# Because we're in the advisory period we'll not propogate
# the exception and return the current set of credentials
# (generation '1').
- self.assertEqual(
- temp, credentials.ReadOnlyCredentials('0', '0', '0'))
+ assert temp == credentials.ReadOnlyCredentials('0', '0', '0')
def test_exception_propogated_on_error_during_mandatory_period(self):
fail_refresh = mock.Mock(side_effect=Exception("refresh failed"))
@@ -2746,7 +2743,7 @@ class TestRefreshLogic(unittest.TestCase):
mandatory_refresh=7,
refresh_function=fail_refresh
)
- with six.assertRaisesRegex(self, Exception, 'refresh failed'):
+ with pytest.raises(Exception, match=r'refresh failed'):
creds.get_frozen_credentials()
def test_exception_propogated_on_expired_credentials(self):
@@ -2759,7 +2756,7 @@ class TestRefreshLogic(unittest.TestCase):
mandatory_refresh=7,
refresh_function=fail_refresh
)
- with six.assertRaisesRegex(self, Exception, 'refresh failed'):
+ with pytest.raises(Exception, match=r'refresh failed'):
# Because credentials are actually expired, any
# failure to refresh should be propagated.
creds.get_frozen_credentials()
@@ -2780,7 +2777,7 @@ class TestRefreshLogic(unittest.TestCase):
creds_last_for=-2,
)
err_msg = 'refreshed credentials are still expired'
- with six.assertRaisesRegex(self, RuntimeError, err_msg):
+ with pytest.raises(RuntimeError, match=err_msg):
# Because credentials are actually expired, any
# failure to refresh should be propagated.
creds.get_frozen_credentials()
@@ -2793,7 +2790,7 @@ class TestContainerProvider(BaseEnvVar):
environ = {}
provider = credentials.ContainerProvider(environ)
creds = provider.load()
- self.assertIsNone(creds)
+ assert creds is None
def full_url(self, url):
return 'http://%s%s' % (ContainerMetadataFetcher.IP_ADDRESS, url)
@@ -2821,10 +2818,10 @@ class TestContainerProvider(BaseEnvVar):
fetcher.retrieve_full_uri.assert_called_with(
self.full_url('/latest/credentials?id=foo'), headers=None)
- self.assertEqual(creds.access_key, 'access_key')
- self.assertEqual(creds.secret_key, 'secret_key')
- self.assertEqual(creds.token, 'token')
- self.assertEqual(creds.method, 'container-role')
+ assert creds.access_key == 'access_key'
+ assert creds.secret_key == 'secret_key'
+ assert creds.token == 'token'
+ assert creds.method == 'container-role'
def test_creds_refresh_when_needed(self):
environ = {
@@ -2851,9 +2848,9 @@ class TestContainerProvider(BaseEnvVar):
provider = credentials.ContainerProvider(environ, fetcher)
creds = provider.load()
frozen_creds = creds.get_frozen_credentials()
- self.assertEqual(frozen_creds.access_key, 'access_key_new')
- self.assertEqual(frozen_creds.secret_key, 'secret_key_new')
- self.assertEqual(frozen_creds.token, 'token_new')
+ assert frozen_creds.access_key == 'access_key_new'
+ assert frozen_creds.secret_key == 'secret_key_new'
+ assert frozen_creds.token == 'token_new'
def test_http_error_propagated(self):
environ = {
@@ -2866,7 +2863,7 @@ class TestContainerProvider(BaseEnvVar):
exception = botocore.exceptions.CredentialRetrievalError
fetcher.retrieve_full_uri.side_effect = exception(provider='ecs-role',
error_msg='fake http error')
- with self.assertRaises(exception):
+ with pytest.raises(exception):
provider = credentials.ContainerProvider(environ, fetcher)
creds = provider.load()
@@ -2894,7 +2891,7 @@ class TestContainerProvider(BaseEnvVar):
# First time works with no issues.
creds = provider.load()
# Second time with a refresh should propagate an error.
- with self.assertRaises(raised_exception):
+ with pytest.raises(raised_exception):
frozen_creds = creds.get_frozen_credentials()
def test_can_use_full_url(self):
@@ -2915,10 +2912,10 @@ class TestContainerProvider(BaseEnvVar):
fetcher.retrieve_full_uri.assert_called_with('http://localhost/foo',
headers=None)
- self.assertEqual(creds.access_key, 'access_key')
- self.assertEqual(creds.secret_key, 'secret_key')
- self.assertEqual(creds.token, 'token')
- self.assertEqual(creds.method, 'container-role')
+ assert creds.access_key == 'access_key'
+ assert creds.secret_key == 'secret_key'
+ assert creds.token == 'token'
+ assert creds.method == 'container-role'
def test_can_pass_basic_auth_token(self):
environ = {
@@ -2939,10 +2936,10 @@ class TestContainerProvider(BaseEnvVar):
fetcher.retrieve_full_uri.assert_called_with(
'http://localhost/foo', headers={'Authorization': 'Basic auth-token'})
- self.assertEqual(creds.access_key, 'access_key')
- self.assertEqual(creds.secret_key, 'secret_key')
- self.assertEqual(creds.token, 'token')
- self.assertEqual(creds.method, 'container-role')
+ assert creds.access_key == 'access_key'
+ assert creds.secret_key == 'secret_key'
+ assert creds.token == 'token'
+ assert creds.method == 'container-role'
class TestProcessProvider(BaseEnvVar):
@@ -2971,13 +2968,13 @@ class TestProcessProvider(BaseEnvVar):
# self.loaded_config is an empty dictionary with no profile
# information.
provider = self.create_process_provider()
- self.assertIsNone(provider.load())
+ assert provider.load() is None
def test_process_not_invoked_if_not_configured_for_empty_config(self):
# No credential_process configured so we skip this provider.
self.loaded_config['profiles'] = {'default': {}}
provider = self.create_process_provider()
- self.assertIsNone(provider.load())
+ assert provider.load() is None
def test_can_retrieve_via_process(self):
self.loaded_config['profiles'] = {
@@ -2993,11 +2990,11 @@ class TestProcessProvider(BaseEnvVar):
provider = self.create_process_provider()
creds = provider.load()
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertEqual(creds.token, 'baz')
- self.assertEqual(creds.method, 'custom-process')
+ assert creds is not None
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token == 'baz'
+ assert creds.method == 'custom-process'
self.popen_mock.assert_called_with(
['my-process'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE
@@ -3019,7 +3016,7 @@ class TestProcessProvider(BaseEnvVar):
provider = self.create_process_provider()
creds = provider.load()
- self.assertIsNotNone(creds)
+ assert creds is not None
self.popen_mock.assert_called_with(
['my-process', '--foo', '--bar', 'one two'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE
@@ -3054,11 +3051,11 @@ class TestProcessProvider(BaseEnvVar):
provider = self.create_process_provider()
creds = provider.load()
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'foo2')
- self.assertEqual(creds.secret_key, 'bar2')
- self.assertEqual(creds.token, 'baz2')
- self.assertEqual(creds.method, 'custom-process')
+ assert creds is not None
+ assert creds.access_key == 'foo2'
+ assert creds.secret_key == 'bar2'
+ assert creds.token == 'baz2'
+ assert creds.method == 'custom-process'
def test_non_zero_rc_raises_exception(self):
self.loaded_config['profiles'] = {
@@ -3068,7 +3065,7 @@ class TestProcessProvider(BaseEnvVar):
provider = self.create_process_provider()
exception = botocore.exceptions.CredentialRetrievalError
- with six.assertRaisesRegex(self, exception, 'Error Message'):
+ with pytest.raises(exception, match='Error Message'):
provider.load()
def test_unsupported_version_raises_mismatch(self):
@@ -3086,7 +3083,7 @@ class TestProcessProvider(BaseEnvVar):
provider = self.create_process_provider()
exception = botocore.exceptions.CredentialRetrievalError
- with six.assertRaisesRegex(self, exception, 'Unsupported version'):
+ with pytest.raises(exception, match='Unsupported version'):
provider.load()
def test_missing_version_in_payload_returned_raises_exception(self):
@@ -3103,7 +3100,7 @@ class TestProcessProvider(BaseEnvVar):
provider = self.create_process_provider()
exception = botocore.exceptions.CredentialRetrievalError
- with six.assertRaisesRegex(self, exception, 'Unsupported version'):
+ with pytest.raises(exception, match='Unsupported version'):
provider.load()
def test_missing_access_key_raises_exception(self):
@@ -3120,7 +3117,7 @@ class TestProcessProvider(BaseEnvVar):
provider = self.create_process_provider()
exception = botocore.exceptions.CredentialRetrievalError
- with six.assertRaisesRegex(self, exception, 'Missing required key'):
+ with pytest.raises(exception, match='Missing required key'):
provider.load()
def test_missing_secret_key_raises_exception(self):
@@ -3137,7 +3134,7 @@ class TestProcessProvider(BaseEnvVar):
provider = self.create_process_provider()
exception = botocore.exceptions.CredentialRetrievalError
- with six.assertRaisesRegex(self, exception, 'Missing required key'):
+ with pytest.raises(exception, match='Missing required key'):
provider.load()
def test_missing_session_token(self):
@@ -3154,11 +3151,11 @@ class TestProcessProvider(BaseEnvVar):
provider = self.create_process_provider()
creds = provider.load()
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertIsNone(creds.token)
- self.assertEqual(creds.method, 'custom-process')
+ assert creds is not None
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token is None
+ assert creds.method == 'custom-process'
def test_missing_expiration(self):
self.loaded_config['profiles'] = {
@@ -3175,10 +3172,11 @@ class TestProcessProvider(BaseEnvVar):
provider = self.create_process_provider()
creds = provider.load()
self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertEqual(creds.token, 'baz')
- self.assertEqual(creds.method, 'custom-process')
+ assert creds is not None
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token == 'baz'
+ assert creds.method == 'custom-process'
def test_missing_expiration_and_session_token(self):
self.loaded_config['profiles'] = {
@@ -3193,11 +3191,11 @@ class TestProcessProvider(BaseEnvVar):
provider = self.create_process_provider()
creds = provider.load()
- self.assertIsNotNone(creds)
- self.assertEqual(creds.access_key, 'foo')
- self.assertEqual(creds.secret_key, 'bar')
- self.assertIsNone(creds.token)
- self.assertEqual(creds.method, 'custom-process')
+ assert creds is not None
+ assert creds.access_key == 'foo'
+ assert creds.secret_key == 'bar'
+ assert creds.token is None
+ assert creds.method == 'custom-process'
class TestProfileProviderBuilder(unittest.TestCase):
@@ -3215,10 +3213,10 @@ class TestProfileProviderBuilder(unittest.TestCase):
ProcessProvider,
ConfigProvider,
]
- self.assertEqual(len(providers), len(expected_providers))
+ assert len(providers) == len(expected_providers)
zipped_providers = six.moves.zip(providers, expected_providers)
for provider, expected_type in zipped_providers:
- self.assertTrue(isinstance(provider, expected_type))
+ assert isinstance(provider, expected_type)
class TestSSOCredentialFetcher(unittest.TestCase):
@@ -3268,10 +3266,10 @@ class TestSSOCredentialFetcher(unittest.TestCase):
)
with self.stubber:
credentials = self.fetcher.fetch_credentials()
- self.assertEqual(credentials['access_key'], 'foo')
- self.assertEqual(credentials['secret_key'], 'bar')
- self.assertEqual(credentials['token'], 'baz')
- self.assertEqual(credentials['expiry_time'], '2008-09-23T12:43:20Z')
+ assert credentials['access_key'] == 'foo'
+ assert credentials['secret_key'] == 'bar'
+ assert credentials['token'] == 'baz'
+ assert credentials['expiry_time'] == '2008-09-23T12:43:20Z'
cache_key = '048db75bbe50955c16af7aba6ff9c41a3131bb7e'
expected_cached_credentials = {
'ProviderType': 'sso',
@@ -3282,7 +3280,7 @@ class TestSSOCredentialFetcher(unittest.TestCase):
'Expiration': '2008-09-23T12:43:20Z',
}
}
- self.assertEqual(self.cache[cache_key], expected_cached_credentials)
+ assert self.cache[cache_key] == expected_cached_credentials
def test_raises_helpful_message_on_unauthorized_exception(self):
expected_params = {
@@ -3295,7 +3293,7 @@ class TestSSOCredentialFetcher(unittest.TestCase):
service_error_code='UnauthorizedException',
expected_params=expected_params,
)
- with self.assertRaises(botocore.exceptions.UnauthorizedSSOTokenError):
+ with pytest.raises(botocore.exceptions.UnauthorizedSSOTokenError):
with self.stubber:
credentials = self.fetcher.fetch_credentials()
@@ -3370,9 +3368,9 @@ class TestSSOProvider(unittest.TestCase):
self._add_get_role_credentials_response()
with self.stubber:
credentials = self.provider.load()
- self.assertEqual(credentials.access_key, 'foo')
- self.assertEqual(credentials.secret_key, 'bar')
- self.assertEqual(credentials.token, 'baz')
+ assert credentials.access_key == 'foo'
+ assert credentials.secret_key == 'bar'
+ assert credentials.token == 'baz'
def test_load_sso_credentials_with_cache(self):
cached_creds = {
@@ -3385,9 +3383,9 @@ class TestSSOProvider(unittest.TestCase):
}
self.cache[self.cached_creds_key] = cached_creds
credentials = self.provider.load()
- self.assertEqual(credentials.access_key, 'cached-akid')
- self.assertEqual(credentials.secret_key, 'cached-sak')
- self.assertEqual(credentials.token, 'cached-st')
+ assert credentials.access_key == 'cached-akid'
+ assert credentials.secret_key == 'cached-sak'
+ assert credentials.token == 'cached-st'
def test_load_sso_credentials_with_cache_expired(self):
cached_creds = {
@@ -3403,12 +3401,12 @@ class TestSSOProvider(unittest.TestCase):
self._add_get_role_credentials_response()
with self.stubber:
credentials = self.provider.load()
- self.assertEqual(credentials.access_key, 'foo')
- self.assertEqual(credentials.secret_key, 'bar')
- self.assertEqual(credentials.token, 'baz')
+ assert credentials.access_key == 'foo'
+ assert credentials.secret_key == 'bar'
+ assert credentials.token == 'baz'
def test_required_config_not_set(self):
del self.config['sso_start_url']
# If any required configuration is missing we should get an error
- with self.assertRaises(botocore.exceptions.InvalidConfigError):
+ with pytest.raises(botocore.exceptions.InvalidConfigError):
self.provider.load()
diff --git a/tests/unit/test_discovery.py b/tests/unit/test_discovery.py
index 719b99be..b20717d5 100644
--- a/tests/unit/test_discovery.py
+++ b/tests/unit/test_discovery.py
@@ -1,4 +1,5 @@
import time
+import pytest
from tests import mock
from tests import unittest
@@ -168,14 +169,13 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
model = self.service_model.operation_model('DescribeEndpoints')
params = {'headers': {}}
inject_api_version_header_if_needed(model, params)
- self.assertEqual(params['headers'].get('x-amz-api-version'),
- '2018-08-31')
+ assert params['headers'].get('x-amz-api-version') == '2018-08-31'
def test_no_inject_api_version_if_not_endpoint_operation(self):
model = self.service_model.operation_model('TestDiscoveryRequired')
params = {'headers': {}}
inject_api_version_header_if_needed(model, params)
- self.assertNotIn('x-amz-api-version', params['headers'])
+ assert 'x-amz-api-version' not in params['headers']
def test_gather_identifiers(self):
params = {
@@ -184,12 +184,12 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
}
operation = self.service_model.operation_model('TestDiscoveryRequired')
ids = self.manager.gather_identifiers(operation, params)
- self.assertEqual(ids, {'Foo': 'value1', 'Bar': 'value2'})
+ assert ids == {'Foo': 'value1', 'Bar': 'value2'}
def test_gather_identifiers_none(self):
operation = self.service_model.operation_model('TestDiscovery')
ids = self.manager.gather_identifiers(operation, {})
- self.assertEqual(ids, {})
+ assert ids == {}
def test_describe_endpoint(self):
kwargs = {
@@ -224,11 +224,11 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
self.manager.describe_endpoint(**kwargs)
self.client.describe_endpoints.assert_called_with(**kwargs)
key = ((('Bar', 'value2'), ('Foo', 'value1')), 'TestDiscoveryRequired')
- self.assertIn(key, cache)
- self.assertEqual(cache[key][0]['Address'], 'new.com')
+ assert key in cache
+ assert cache[key][0]['Address'] == 'new.com'
self.manager.describe_endpoint(**kwargs)
call_count = self.client.describe_endpoints.call_count
- self.assertEqual(call_count, 1)
+ assert call_count == 1
def test_describe_endpoint_no_ids_or_operation(self):
cache = {}
@@ -240,13 +240,13 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
)
self.client.describe_endpoints.assert_called_with()
key = ()
- self.assertIn(key, cache)
- self.assertEqual(cache[key][0]['Address'], 'new.com')
+ assert key in cache
+ assert cache[key][0]['Address'] == 'new.com'
self.manager.describe_endpoint(
Operation='TestDiscoveryRequired', Identifiers={}
)
call_count = self.client.describe_endpoints.call_count
- self.assertEqual(call_count, 1)
+ assert call_count == 1
def test_describe_endpoint_expired_entry(self):
current_time = time.time()
@@ -261,11 +261,11 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
}
self.manager.describe_endpoint(**kwargs)
self.client.describe_endpoints.assert_called_with()
- self.assertIn(key, cache)
- self.assertEqual(cache[key][0]['Address'], 'new.com')
+ assert key in cache
+ assert cache[key][0]['Address'] == 'new.com'
self.manager.describe_endpoint(**kwargs)
call_count = self.client.describe_endpoints.call_count
- self.assertEqual(call_count, 1)
+ assert call_count == 1
def test_describe_endpoint_cache_expiration(self):
def _time():
@@ -276,8 +276,8 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
Operation='TestDiscoveryRequired', Identifiers={}
)
key = ()
- self.assertIn(key, cache)
- self.assertEqual(cache[key][0]['Expiration'], float(120))
+ assert key in cache
+ assert cache[key][0]['Expiration'] == float(120)
def test_delete_endpoints_present(self):
key = ()
@@ -290,7 +290,7 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
'Operation': 'TestDiscoveryRequired',
}
self.manager.delete_endpoints(**kwargs)
- self.assertEqual(cache, {})
+ assert cache == {}
def test_delete_endpoints_absent(self):
cache = {}
@@ -300,17 +300,17 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
'Operation': 'TestDiscoveryRequired',
}
self.manager.delete_endpoints(**kwargs)
- self.assertEqual(cache, {})
+ assert cache == {}
def test_describe_endpoint_optional_fails_no_cache(self):
side_effect = [ConnectionError(error=None)]
self.construct_manager(side_effect=side_effect)
kwargs = {'Operation': 'TestDiscoveryOptional'}
endpoint = self.manager.describe_endpoint(**kwargs)
- self.assertIsNone(endpoint)
+ assert endpoint is None
# This second call should be blocked as we just failed
endpoint = self.manager.describe_endpoint(**kwargs)
- self.assertIsNone(endpoint)
+ assert endpoint is None
self.client.describe_endpoints.call_args_list == [mock.call()]
def test_describe_endpoint_optional_fails_stale_cache(self):
@@ -322,23 +322,23 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
self.construct_manager(cache=cache, side_effect=side_effect)
kwargs = {'Operation': 'TestDiscoveryOptional'}
endpoint = self.manager.describe_endpoint(**kwargs)
- self.assertEqual(endpoint, 'old.com')
+ assert endpoint == 'old.com'
# This second call shouldn't go through as we just failed
endpoint = self.manager.describe_endpoint(**kwargs)
- self.assertEqual(endpoint, 'old.com')
+ assert endpoint == 'old.com'
self.client.describe_endpoints.call_args_list == [mock.call()]
def test_describe_endpoint_required_fails_no_cache(self):
side_effect = [ConnectionError(error=None)] * 2
self.construct_manager(side_effect=side_effect)
kwargs = {'Operation': 'TestDiscoveryRequired'}
- with self.assertRaises(EndpointDiscoveryRefreshFailed):
+ with pytest.raises(EndpointDiscoveryRefreshFailed):
self.manager.describe_endpoint(**kwargs)
# This second call should go through, as we have no cache
- with self.assertRaises(EndpointDiscoveryRefreshFailed):
+ with pytest.raises(EndpointDiscoveryRefreshFailed):
self.manager.describe_endpoint(**kwargs)
describe_count = self.client.describe_endpoints.call_count
- self.assertEqual(describe_count, 2)
+ assert describe_count == 2
def test_describe_endpoint_required_fails_stale_cache(self):
key = ()
@@ -349,10 +349,10 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
self.construct_manager(cache=cache, side_effect=side_effect)
kwargs = {'Operation': 'TestDiscoveryRequired'}
endpoint = self.manager.describe_endpoint(**kwargs)
- self.assertEqual(endpoint, 'old.com')
+ assert endpoint == 'old.com'
# We have a stale endpoint, so this shouldn't fail or force a refresh
endpoint = self.manager.describe_endpoint(**kwargs)
- self.assertEqual(endpoint, 'old.com')
+ assert endpoint == 'old.com'
self.client.describe_endpoints.call_args_list == [mock.call()]
def test_describe_endpoint_required_force_refresh_success(self):
@@ -366,12 +366,12 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
self.construct_manager(side_effect=side_effect)
kwargs = {'Operation': 'TestDiscoveryRequired'}
# First call will fail
- with self.assertRaises(EndpointDiscoveryRefreshFailed):
+ with pytest.raises(EndpointDiscoveryRefreshFailed):
self.manager.describe_endpoint(**kwargs)
self.client.describe_endpoints.call_args_list == [mock.call()]
# Force a refresh if the cache is empty but discovery is required
endpoint = self.manager.describe_endpoint(**kwargs)
- self.assertEqual(endpoint, 'new.com')
+ assert endpoint == 'new.com'
def test_describe_endpoint_retries_after_failing(self):
fake_time = mock.Mock()
@@ -386,11 +386,11 @@ class TestEndpointDiscoveryManager(BaseEndpointDiscoveryTest):
self.construct_manager(side_effect=side_effect, time=fake_time)
kwargs = {'Operation': 'TestDiscoveryOptional'}
endpoint = self.manager.describe_endpoint(**kwargs)
- self.assertIsNone(endpoint)
+ assert endpoint is None
self.client.describe_endpoints.call_args_list == [mock.call()]
# Second time should try again as enough time has elapsed
endpoint = self.manager.describe_endpoint(**kwargs)
- self.assertEqual(endpoint, 'new.com')
+ assert endpoint == 'new.com'
class TestEndpointDiscoveryHandler(BaseEndpointDiscoveryTest):
@@ -420,7 +420,7 @@ class TestEndpointDiscoveryHandler(BaseEndpointDiscoveryTest):
}
self.manager.describe_endpoint.return_value = 'https://new.foo'
self.handler.discover_endpoint(request, 'TestOperation')
- self.assertEqual(request.url, 'https://new.foo')
+ assert request.url == 'https://new.foo'
self.manager.describe_endpoint.assert_called_with(
Operation='TestOperation', Identifiers={}
)
@@ -433,7 +433,7 @@ class TestEndpointDiscoveryHandler(BaseEndpointDiscoveryTest):
request.url = 'old.com'
self.manager.describe_endpoint.return_value = None
self.handler.discover_endpoint(request, 'TestOperation')
- self.assertEqual(request.url, 'old.com')
+ assert request.url == 'old.com'
self.manager.describe_endpoint.assert_called_with(
Operation='TestOperation', Identifiers={}
)
@@ -445,7 +445,7 @@ class TestEndpointDiscoveryHandler(BaseEndpointDiscoveryTest):
}
self.manager.describe_endpoint.return_value = 'new.foo'
self.handler.discover_endpoint(request, 'TestOperation')
- self.assertEqual(request.url, 'https://new.foo')
+ assert request.url == 'https://new.foo'
self.manager.describe_endpoint.assert_called_with(
Operation='TestOperation', Identifiers={}
)
@@ -453,7 +453,7 @@ class TestEndpointDiscoveryHandler(BaseEndpointDiscoveryTest):
def test_inject_no_context(self):
request = AWSRequest(url='https://original.foo')
self.handler.discover_endpoint(request, 'TestOperation')
- self.assertEqual(request.url, 'https://original.foo')
+ assert request.url == 'https://original.foo'
self.manager.describe_endpoint.assert_not_called()
def test_gather_identifiers(self):
@@ -469,28 +469,28 @@ class TestEndpointDiscoveryHandler(BaseEndpointDiscoveryTest):
model = self.service_model.operation_model('TestDiscoveryRequired')
self.manager.gather_identifiers.return_value = ids
self.handler.gather_identifiers(params, model, context)
- self.assertEqual(context['discovery']['identifiers'], ids)
+ assert context['discovery']['identifiers'] == ids
def test_gather_identifiers_not_discoverable(self):
context = {}
model = self.service_model.operation_model('DescribeEndpoints')
self.handler.gather_identifiers({}, model, context)
- self.assertEqual(context, {})
+ assert context == {}
def test_discovery_disabled_but_required(self):
model = self.service_model.operation_model('TestDiscoveryRequired')
- with self.assertRaises(EndpointDiscoveryRequired):
+ with pytest.raises(EndpointDiscoveryRequired):
block_endpoint_discovery_required_operations(model)
def test_discovery_disabled_but_optional(self):
context = {}
model = self.service_model.operation_model('TestDiscoveryOptional')
block_endpoint_discovery_required_operations(model, context=context)
- self.assertEqual(context, {})
+ assert context == {}
def test_does_not_retry_no_response(self):
retry = self.handler.handle_retries(None, None, None)
- self.assertIsNone(retry)
+ assert retry is None
def test_does_not_retry_other_errors(self):
parsed_response = {
@@ -498,7 +498,7 @@ class TestEndpointDiscoveryHandler(BaseEndpointDiscoveryTest):
}
response = (None, parsed_response)
retry = self.handler.handle_retries(None, response, None)
- self.assertIsNone(retry)
+ assert retry is None
def test_does_not_retry_if_no_context(self):
request_dict = {'context': {}}
@@ -507,7 +507,7 @@ class TestEndpointDiscoveryHandler(BaseEndpointDiscoveryTest):
}
response = (None, parsed_response)
retry = self.handler.handle_retries(request_dict, response, None)
- self.assertIsNone(retry)
+ assert retry is None
def _assert_retries(self, parsed_response):
request_dict = {
@@ -518,7 +518,7 @@ class TestEndpointDiscoveryHandler(BaseEndpointDiscoveryTest):
response = (None, parsed_response)
model = self.service_model.operation_model('TestDiscoveryOptional')
retry = self.handler.handle_retries(request_dict, response, model)
- self.assertEqual(retry, 0)
+ assert retry == 0
self.manager.delete_endpoints.assert_called_with(
Operation='TestDiscoveryOptional', Identifiers={}
)
diff --git a/tests/unit/test_endpoint.py b/tests/unit/test_endpoint.py
index b9a97df5..81ec3dde 100644
--- a/tests/unit/test_endpoint.py
+++ b/tests/unit/test_endpoint.py
@@ -11,6 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import socket
+import pytest
from tests import unittest
from tests import mock
@@ -102,17 +103,17 @@ class TestEndpointFeatures(TestEndpointBase):
self.endpoint.make_request(self.op, request_dict())
# http_session should be used to send the request.
- self.assertTrue(self.http_session.send.called)
+ assert self.http_session.send.called
prepared_request = self.http_session.send.call_args[0][0]
- self.assertNotIn('Authorization', prepared_request.headers)
+ assert 'Authorization' not in prepared_request.headers
def test_make_request_no_signature_version(self):
self.endpoint.make_request(self.op, request_dict())
# http_session should be used to send the request.
- self.assertTrue(self.http_session.send.called)
+ assert self.http_session.send.called
prepared_request = self.http_session.send.call_args[0][0]
- self.assertNotIn('Authorization', prepared_request.headers)
+ assert 'Authorization' not in prepared_request.headers
def test_make_request_with_context(self):
r = request_dict()
@@ -120,7 +121,7 @@ class TestEndpointFeatures(TestEndpointBase):
with mock.patch('botocore.endpoint.Endpoint.prepare_request') as prepare:
self.endpoint.make_request(self.op, r)
request = prepare.call_args[0][0]
- self.assertEqual(request.context['signing']['region'], 'us-west-2')
+ assert request.context['signing']['region'] == 'us-west-2'
def test_parses_modeled_exception_fields(self):
# Setup the service model to have exceptions to generate the mapping
@@ -148,10 +149,10 @@ class TestEndpointFeatures(TestEndpointBase):
_, response = self.endpoint.make_request(self.op, r)
# The parser should be called twice, once for the original
# error parse and once again for the modeled exception parse
- self.assertEqual(parser.parse.call_count, 2)
+ assert parser.parse.call_count == 2
parse_calls = parser.parse.call_args_list
- self.assertEqual(parse_calls[1][0][1], self.exception_shape)
- self.assertEqual(parse_calls[0][0][1], self.op.output_shape)
+ assert parse_calls[1][0][1] == self.exception_shape
+ assert parse_calls[0][0][1] == self.op.output_shape
expected_response = {
'Error': {
'Code': 'ExceptionShape',
@@ -159,7 +160,7 @@ class TestEndpointFeatures(TestEndpointBase):
},
'SomeField': 'Foo',
}
- self.assertEqual(response, expected_response)
+ assert response == expected_response
class TestRetryInterface(TestEndpointBase):
@@ -174,14 +175,12 @@ class TestRetryInterface(TestEndpointBase):
self._operation.has_event_stream_output = False
def assert_events_emitted(self, event_emitter, expected_events):
- self.assertEqual(
- self.get_events_emitted(event_emitter), expected_events)
+ assert self.get_events_emitted(event_emitter) == expected_events
def test_retry_events_are_emitted(self):
self.endpoint.make_request(self._operation, request_dict())
call_args = self.event_emitter.emit.call_args
- self.assertEqual(call_args[0][0],
- 'needs-retry.ec2.DescribeInstances')
+ assert call_args[0][0] == 'needs-retry.ec2.DescribeInstances'
def test_retry_events_can_alter_behavior(self):
self.event_emitter.emit.side_effect = self.get_emitter_responses(
@@ -201,7 +200,7 @@ class TestRetryInterface(TestEndpointBase):
self.event_emitter.emit.side_effect = self.get_emitter_responses(
num_retries=1)
self.http_session.send.side_effect = HTTPClientError(error='wrapped')
- with self.assertRaises(HTTPClientError):
+ with pytest.raises(HTTPClientError):
self.endpoint.make_request(self._operation, request_dict())
self.assert_events_emitted(
self.event_emitter,
@@ -220,7 +219,7 @@ class TestRetryInterface(TestEndpointBase):
parser.parse.return_value = {'ResponseMetadata': {}}
self.factory.return_value.create_parser.return_value = parser
response = self.endpoint.make_request(self._operation, request_dict())
- self.assertEqual(response[1]['ResponseMetadata']['RetryAttempts'], 1)
+ assert response[1]['ResponseMetadata']['RetryAttempts'] == 1
def test_retry_attempts_is_zero_when_not_retried(self):
self.event_emitter.emit.side_effect = self.get_emitter_responses(
@@ -229,7 +228,7 @@ class TestRetryInterface(TestEndpointBase):
parser.parse.return_value = {'ResponseMetadata': {}}
self.factory.return_value.create_parser.return_value = parser
response = self.endpoint.make_request(self._operation, request_dict())
- self.assertEqual(response[1]['ResponseMetadata']['RetryAttempts'], 0)
+ assert response[1]['ResponseMetadata']['RetryAttempts'] == 0
class TestS3ResetStreamOnRetry(TestEndpointBase):
@@ -260,7 +259,7 @@ class TestS3ResetStreamOnRetry(TestEndpointBase):
)
self.endpoint.make_request(op, request)
# 2 seeks for the resets and 6 (2 per creation) for content-length
- self.assertEqual(body.total_resets, 8)
+ assert body.total_resets == 8
class TestEventStreamBody(TestEndpointBase):
@@ -270,7 +269,7 @@ class TestEventStreamBody(TestEndpointBase):
request = request_dict()
self.endpoint.make_request(self.op, request)
sent_request = self.http_session.send.call_args[0][0]
- self.assertTrue(sent_request.stream_output)
+ assert sent_request.stream_output
class TestEndpointCreator(unittest.TestCase):
@@ -291,7 +290,7 @@ class TestEndpointCreator(unittest.TestCase):
endpoint = self.creator.create_endpoint(
self.service_model, region_name='us-east-1',
endpoint_url='https://endpoint.url')
- self.assertEqual(endpoint.host, 'https://endpoint.url')
+ assert endpoint.host == 'https://endpoint.url'
def test_create_endpoint_with_default_timeout(self):
endpoint = self.creator.create_endpoint(
@@ -299,7 +298,7 @@ class TestEndpointCreator(unittest.TestCase):
endpoint_url='https://example.com',
http_session_cls=self.mock_session)
session_args = self.mock_session.call_args[1]
- self.assertEqual(session_args.get('timeout'), DEFAULT_TIMEOUT)
+ assert session_args.get('timeout') == DEFAULT_TIMEOUT
def test_create_endpoint_with_customized_timeout(self):
endpoint = self.creator.create_endpoint(
@@ -307,7 +306,7 @@ class TestEndpointCreator(unittest.TestCase):
endpoint_url='https://example.com', timeout=123,
http_session_cls=self.mock_session)
session_args = self.mock_session.call_args[1]
- self.assertEqual(session_args.get('timeout'), 123)
+ assert session_args.get('timeout') == 123
def test_get_endpoint_default_verify_ssl(self):
endpoint = self.creator.create_endpoint(
@@ -315,7 +314,7 @@ class TestEndpointCreator(unittest.TestCase):
endpoint_url='https://example.com',
http_session_cls=self.mock_session)
session_args = self.mock_session.call_args[1]
- self.assertTrue(session_args.get('verify'))
+ assert session_args.get('verify')
def test_verify_ssl_can_be_disabled(self):
endpoint = self.creator.create_endpoint(
@@ -323,7 +322,7 @@ class TestEndpointCreator(unittest.TestCase):
endpoint_url='https://example.com', verify=False,
http_session_cls=self.mock_session)
session_args = self.mock_session.call_args[1]
- self.assertFalse(session_args.get('verify'))
+ assert not session_args.get('verify')
def test_verify_ssl_can_specify_cert_bundle(self):
endpoint = self.creator.create_endpoint(
@@ -331,7 +330,7 @@ class TestEndpointCreator(unittest.TestCase):
endpoint_url='https://example.com', verify='/path/cacerts.pem',
http_session_cls=self.mock_session)
session_args = self.mock_session.call_args[1]
- self.assertEqual(session_args.get('verify'), '/path/cacerts.pem')
+ assert session_args.get('verify') == '/path/cacerts.pem'
def test_client_cert_can_specify_path(self):
client_cert = '/some/path/cert'
@@ -340,7 +339,7 @@ class TestEndpointCreator(unittest.TestCase):
endpoint_url='https://example.com', client_cert=client_cert,
http_session_cls=self.mock_session)
session_args = self.mock_session.call_args[1]
- self.assertEqual(session_args.get('client_cert'), '/some/path/cert')
+ assert session_args.get('client_cert') == '/some/path/cert'
def test_honor_cert_bundle_env_var(self):
self.environ['REQUESTS_CA_BUNDLE'] = '/env/cacerts.pem'
@@ -349,7 +348,7 @@ class TestEndpointCreator(unittest.TestCase):
endpoint_url='https://example.com',
http_session_cls=self.mock_session)
session_args = self.mock_session.call_args[1]
- self.assertEqual(session_args.get('verify'), '/env/cacerts.pem')
+ assert session_args.get('verify') == '/env/cacerts.pem'
def test_env_ignored_if_explicitly_passed(self):
self.environ['REQUESTS_CA_BUNDLE'] = '/env/cacerts.pem'
@@ -359,7 +358,7 @@ class TestEndpointCreator(unittest.TestCase):
http_session_cls=self.mock_session)
session_args = self.mock_session.call_args[1]
# /path/cacerts.pem wins over the value from the env var.
- self.assertEqual(session_args.get('verify'), '/path/cacerts.pem')
+ assert session_args.get('verify') == '/path/cacerts.pem'
def test_can_specify_max_pool_conns(self):
endpoint = self.creator.create_endpoint(
@@ -369,7 +368,7 @@ class TestEndpointCreator(unittest.TestCase):
http_session_cls=self.mock_session,
)
session_args = self.mock_session.call_args[1]
- self.assertEqual(session_args.get('max_pool_connections'), 100)
+ assert session_args.get('max_pool_connections') == 100
def test_socket_options(self):
socket_options = [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)]
@@ -378,4 +377,4 @@ class TestEndpointCreator(unittest.TestCase):
endpoint_url='https://example.com',
http_session_cls=self.mock_session, socket_options=socket_options)
session_args = self.mock_session.call_args[1]
- self.assertEqual(session_args.get('socket_options'), socket_options)
+ assert session_args.get('socket_options') == socket_options
diff --git a/tests/unit/test_errorfactory.py b/tests/unit/test_errorfactory.py
index f8c78dbb..0fa01333 100644
--- a/tests/unit/test_errorfactory.py
+++ b/tests/unit/test_errorfactory.py
@@ -11,6 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
+import pytest
from botocore.compat import six
from botocore.exceptions import ClientError
@@ -25,23 +26,20 @@ class TestBaseClientExceptions(unittest.TestCase):
self.exceptions = BaseClientExceptions(self.code_to_exception)
def test_has_client_error(self):
- self.assertIs(self.exceptions.ClientError, ClientError)
+ assert self.exceptions.ClientError is ClientError
def test_from_code(self):
exception_cls = type('MyException', (ClientError,), {})
self.code_to_exception['MyExceptionCode'] = exception_cls
- self.assertIs(
- self.exceptions.from_code('MyExceptionCode'), exception_cls)
+ assert self.exceptions.from_code('MyExceptionCode') is exception_cls
def test_from_code_nonmatch_defaults_to_client_error(self):
- self.assertIs(
- self.exceptions.from_code('SomeUnknownErrorCode'), ClientError)
+ assert self.exceptions.from_code('SomeUnknownErrorCode') is ClientError
def test_gettattr_message(self):
exception_cls = type('MyException', (ClientError,), {})
self.code_to_exception['MyExceptionCode'] = exception_cls
- with six.assertRaisesRegex(self,
- AttributeError, 'Valid exceptions are: MyException'):
+ with pytest.raises(AttributeError, match='Valid exceptions are: MyException'):
self.exceptions.SomeUnmodeledError
@@ -100,29 +98,27 @@ class TestClientExceptionsFactory(unittest.TestCase):
def test_creates_modeled_exception(self):
exceptions = self.exceptions_factory.create_client_exceptions(
self.service_model)
- self.assertTrue(hasattr(exceptions, 'ExceptionWithModeledCode'))
+ assert hasattr(exceptions, 'ExceptionWithModeledCode')
modeled_exception = exceptions.ExceptionWithModeledCode
- self.assertEqual(
- modeled_exception.__name__, 'ExceptionWithModeledCode')
- self.assertTrue(issubclass(modeled_exception, ClientError))
+ assert modeled_exception.__name__ == 'ExceptionWithModeledCode'
+ assert issubclass(modeled_exception, ClientError)
def test_collects_modeled_exceptions_for_all_operations(self):
exceptions = self.exceptions_factory.create_client_exceptions(
self.service_model)
# Make sure exceptions were added for all operations by checking
# an exception only found on an a different operation.
- self.assertTrue(hasattr(exceptions, 'ExceptionForAnotherOperation'))
+ assert hasattr(exceptions, 'ExceptionForAnotherOperation')
modeled_exception = exceptions.ExceptionForAnotherOperation
- self.assertEqual(
- modeled_exception.__name__, 'ExceptionForAnotherOperation')
- self.assertTrue(issubclass(modeled_exception, ClientError))
+ assert modeled_exception.__name__ == 'ExceptionForAnotherOperation'
+ assert issubclass(modeled_exception, ClientError)
def test_creates_modeled_exception_mapping_that_has_code(self):
exceptions = self.exceptions_factory.create_client_exceptions(
self.service_model)
exception = exceptions.from_code('ModeledCode')
- self.assertEqual(exception.__name__, 'ExceptionWithModeledCode')
- self.assertTrue(issubclass(exception, ClientError))
+ assert exception.__name__ == 'ExceptionWithModeledCode'
+ assert issubclass(exception, ClientError)
def test_creates_modeled_exception_mapping_that_has_no_code(self):
exceptions = self.exceptions_factory.create_client_exceptions(
@@ -130,5 +126,5 @@ class TestClientExceptionsFactory(unittest.TestCase):
# For exceptions that do not have an explicit code associated to them,
# the code is the name of the exception.
exception = exceptions.from_code('ExceptionMissingCode')
- self.assertEqual(exception.__name__, 'ExceptionMissingCode')
- self.assertTrue(issubclass(exception, ClientError))
+ assert exception.__name__ == 'ExceptionMissingCode'
+ assert issubclass(exception, ClientError)
diff --git a/tests/unit/test_exceptions.py b/tests/unit/test_exceptions.py
index a81fbf85..eedb90e1 100644
--- a/tests/unit/test_exceptions.py
+++ b/tests/unit/test_exceptions.py
@@ -88,20 +88,18 @@ class TestPickleExceptions(unittest.TestCase):
exception = botocore.exceptions.DataNotFoundError(
data_path='mypath')
unpickled_exception = pickle.loads(pickle.dumps(exception))
- self.assertIsInstance(
- unpickled_exception, botocore.exceptions.DataNotFoundError)
- self.assertEqual(str(unpickled_exception), str(exception))
- self.assertEqual(unpickled_exception.kwargs, exception.kwargs)
+ assert isinstance(unpickled_exception, botocore.exceptions.DataNotFoundError)
+ assert str(unpickled_exception) == str(exception)
+ assert unpickled_exception.kwargs == exception.kwargs
def test_multiple_kwarg_botocore_error(self):
exception = botocore.exceptions.UnknownServiceError(
service_name='myservice', known_service_names=['s3']
)
unpickled_exception = pickle.loads(pickle.dumps(exception))
- self.assertIsInstance(
- unpickled_exception, botocore.exceptions.UnknownServiceError)
- self.assertEqual(str(unpickled_exception), str(exception))
- self.assertEqual(unpickled_exception.kwargs, exception.kwargs)
+ assert isinstance(unpickled_exception, botocore.exceptions.UnknownServiceError)
+ assert str(unpickled_exception) == str(exception)
+ assert unpickled_exception.kwargs == exception.kwargs
def test_client_error(self):
exception = botocore.exceptions.ClientError(
@@ -110,12 +108,10 @@ class TestPickleExceptions(unittest.TestCase):
operation_name='myoperation'
)
unpickled_exception = pickle.loads(pickle.dumps(exception))
- self.assertIsInstance(
- unpickled_exception, botocore.exceptions.ClientError)
- self.assertEqual(str(unpickled_exception), str(exception))
- self.assertEqual(
- unpickled_exception.operation_name, exception.operation_name)
- self.assertEqual(unpickled_exception.response, exception.response)
+ assert isinstance(unpickled_exception, botocore.exceptions.ClientError)
+ assert str(unpickled_exception) == str(exception)
+ assert unpickled_exception.operation_name == exception.operation_name
+ assert unpickled_exception.response == exception.response
def test_dynamic_client_error(self):
session = botocore.session.Session()
@@ -126,12 +122,10 @@ class TestPickleExceptions(unittest.TestCase):
operation_name='myoperation'
)
unpickled_exception = pickle.loads(pickle.dumps(exception))
- self.assertIsInstance(
- unpickled_exception, botocore.exceptions.ClientError)
- self.assertEqual(str(unpickled_exception), str(exception))
- self.assertEqual(
- unpickled_exception.operation_name, exception.operation_name)
- self.assertEqual(unpickled_exception.response, exception.response)
+ assert isinstance(unpickled_exception, botocore.exceptions.ClientError)
+ assert str(unpickled_exception) == str(exception)
+ assert unpickled_exception.operation_name == exception.operation_name
+ assert unpickled_exception.response == exception.response
def test_http_client_error(self):
exception = botocore.exceptions.HTTPClientError(
@@ -145,16 +139,11 @@ class TestPickleExceptions(unittest.TestCase):
error='error'
)
unpickled_exception = pickle.loads(pickle.dumps(exception))
- self.assertIsInstance(
- unpickled_exception,
- botocore.exceptions.HTTPClientError
- )
- self.assertEqual(str(unpickled_exception), str(exception))
- self.assertEqual(unpickled_exception.kwargs, exception.kwargs)
+ assert isinstance(unpickled_exception, botocore.exceptions.HTTPClientError)
+ assert str(unpickled_exception) == str(exception)
+ assert unpickled_exception.kwargs == exception.kwargs
# The request/response properties on the HTTPClientError do not have
# __eq__ defined so we want to make sure properties are at least
# of the expected type
- self.assertIsInstance(
- unpickled_exception.request, botocore.awsrequest.AWSRequest)
- self.assertIsInstance(
- unpickled_exception.response, botocore.awsrequest.AWSResponse)
+ assert isinstance(unpickled_exception.request, botocore.awsrequest.AWSRequest)
+ assert isinstance(unpickled_exception.response, botocore.awsrequest.AWSResponse)
diff --git a/tests/unit/test_handlers.py b/tests/unit/test_handlers.py
index 7f7aa3a4..eeee6270 100644
--- a/tests/unit/test_handlers.py
+++ b/tests/unit/test_handlers.py
@@ -18,6 +18,7 @@ from tests import mock
import copy
import os
import json
+import pytest
import botocore
import botocore.session
@@ -47,18 +48,18 @@ class TestHandlers(BaseSessionTest):
def test_get_console_output(self):
parsed = {'Output': base64.b64encode(b'foobar').decode('utf-8')}
handlers.decode_console_output(parsed)
- self.assertEqual(parsed['Output'], 'foobar')
+ assert parsed['Output'] == 'foobar'
def test_get_console_output_cant_be_decoded(self):
parsed = {'Output': 1}
handlers.decode_console_output(parsed)
- self.assertEqual(parsed['Output'], 1)
+ assert parsed['Output'] == 1
def test_get_console_output_bad_unicode_errors(self):
original = base64.b64encode(b'before\xffafter').decode('utf-8')
parsed = {'Output': original}
handlers.decode_console_output(parsed)
- self.assertEqual(parsed['Output'], u'before\ufffdafter')
+ assert parsed['Output'] == u'before\ufffdafter'
def test_noop_if_output_key_does_not_exist(self):
original = {'foo': 'bar'}
@@ -67,52 +68,49 @@ class TestHandlers(BaseSessionTest):
handlers.decode_console_output(parsed)
# Should be unchanged because the 'Output'
# key is not in the output.
- self.assertEqual(parsed, original)
+ assert parsed == original
def test_decode_quoted_jsondoc(self):
value = quote('{"foo":"bar"}')
converted_value = handlers.decode_quoted_jsondoc(value)
- self.assertEqual(converted_value, {'foo': 'bar'})
+ assert converted_value == {'foo': 'bar'}
def test_cant_decode_quoted_jsondoc(self):
value = quote('{"foo": "missing end quote}')
converted_value = handlers.decode_quoted_jsondoc(value)
- self.assertEqual(converted_value, value)
+ assert converted_value == value
def test_disable_signing(self):
- self.assertEqual(handlers.disable_signing(), botocore.UNSIGNED)
+ assert handlers.disable_signing() == botocore.UNSIGNED
def test_only_quote_url_path_not_version_id(self):
params = {'CopySource': '/foo/bar++baz?versionId=123'}
handlers.handle_copy_source_param(params)
- self.assertEqual(params['CopySource'],
- '/foo/bar%2B%2Bbaz?versionId=123')
+ assert params['CopySource'] == '/foo/bar%2B%2Bbaz?versionId=123'
def test_only_version_id_is_special_cased(self):
params = {'CopySource': '/foo/bar++baz?notVersion=foo+'}
handlers.handle_copy_source_param(params)
- self.assertEqual(params['CopySource'],
- '/foo/bar%2B%2Bbaz%3FnotVersion%3Dfoo%2B')
+ assert params['CopySource'] == '/foo/bar%2B%2Bbaz%3FnotVersion%3Dfoo%2B'
def test_copy_source_with_multiple_questions(self):
params = {'CopySource': '/foo/bar+baz?a=baz+?versionId=a+'}
handlers.handle_copy_source_param(params)
- self.assertEqual(params['CopySource'],
- '/foo/bar%2Bbaz%3Fa%3Dbaz%2B?versionId=a+')
+ assert params['CopySource'] == '/foo/bar%2Bbaz%3Fa%3Dbaz%2B?versionId=a+'
def test_copy_source_supports_dict(self):
params = {
'CopySource': {'Bucket': 'foo', 'Key': 'keyname+'}
}
handlers.handle_copy_source_param(params)
- self.assertEqual(params['CopySource'], 'foo/keyname%2B')
+ assert params['CopySource'] == 'foo/keyname%2B'
def test_copy_source_ignored_if_not_dict(self):
params = {
'CopySource': 'stringvalue'
}
handlers.handle_copy_source_param(params)
- self.assertEqual(params['CopySource'], 'stringvalue')
+ assert params['CopySource'] == 'stringvalue'
def test_copy_source_supports_optional_version_id(self):
params = {
@@ -121,20 +119,18 @@ class TestHandlers(BaseSessionTest):
'VersionId': 'asdf+'}
}
handlers.handle_copy_source_param(params)
- self.assertEqual(params['CopySource'],
- # Note, versionId is not url encoded.
- 'foo/keyname%2B?versionId=asdf+')
+ # Note, versionId is not url encoded.
+ assert params['CopySource'] == 'foo/keyname%2B?versionId=asdf+'
def test_copy_source_has_validation_failure(self):
- with six.assertRaisesRegex(self, ParamValidationError, 'Key'):
+ with pytest.raises(ParamValidationError, match='Key'):
handlers.handle_copy_source_param(
{'CopySource': {'Bucket': 'foo'}})
def test_quote_source_header_needs_no_changes(self):
params = {'CopySource': '/foo/bar?versionId=123'}
handlers.handle_copy_source_param(params)
- self.assertEqual(params['CopySource'],
- '/foo/bar?versionId=123')
+ assert params['CopySource'] == '/foo/bar?versionId=123'
def test_presigned_url_already_present_ec2(self):
operation_model = mock.Mock()
@@ -147,7 +143,7 @@ class TestHandlers(BaseSessionTest):
credentials, event_emitter)
handlers.inject_presigned_url_ec2(
params, request_signer, operation_model)
- self.assertEqual(params['body']['PresignedUrl'], 'https://foo')
+ assert params['body']['PresignedUrl'] == 'https://foo'
def test_presigned_url_with_source_region_ec2(self):
operation_model = mock.Mock()
@@ -165,8 +161,8 @@ class TestHandlers(BaseSessionTest):
event_emitter)
handlers.inject_presigned_url_ec2(
params, request_signer, operation_model)
- self.assertEqual(params['body']['PresignedUrl'], 'https://foo')
- self.assertEqual(params['body']['SourceRegion'], 'us-east-1')
+ assert params['body']['PresignedUrl'] == 'https://foo'
+ assert params['body']['SourceRegion'] == 'us-east-1'
def test_presigned_url_already_present_rds(self):
operation_model = mock.Mock()
@@ -179,7 +175,7 @@ class TestHandlers(BaseSessionTest):
event_emitter)
handlers.inject_presigned_url_rds(
params, request_signer, operation_model)
- self.assertEqual(params['body']['PreSignedUrl'], 'https://foo')
+ assert params['body']['PreSignedUrl'] == 'https://foo'
def test_presigned_url_with_source_region_rds(self):
operation_model = mock.Mock()
@@ -197,8 +193,8 @@ class TestHandlers(BaseSessionTest):
event_emitter)
handlers.inject_presigned_url_rds(
params, request_signer, operation_model)
- self.assertEqual(params['body']['PreSignedUrl'], 'https://foo')
- self.assertNotIn('SourceRegion', params['body'])
+ assert params['body']['PreSignedUrl'] == 'https://foo'
+ assert 'SourceRegion' not in params['body']
def test_inject_presigned_url_ec2(self):
operation_model = mock.Mock()
@@ -219,14 +215,13 @@ class TestHandlers(BaseSessionTest):
handlers.inject_presigned_url_ec2(
request_dict, request_signer, operation_model)
- self.assertIn('https://ec2.us-west-2.amazonaws.com?',
- params['PresignedUrl'])
+ assert 'https://ec2.us-west-2.amazonaws.com?' in params['PresignedUrl']
self.assertIn('X-Amz-Signature',
params['PresignedUrl'])
self.assertIn('DestinationRegion', params['PresignedUrl'])
# We should also populate the DestinationRegion with the
# region_name of the endpoint object.
- self.assertEqual(params['DestinationRegion'], 'us-east-1')
+ assert params['DestinationRegion'] == 'us-east-1'
def test_use_event_operation_name(self):
operation_model = mock.Mock()
@@ -246,7 +241,7 @@ class TestHandlers(BaseSessionTest):
call_args = request_signer.generate_presigned_url.call_args
operation_name = call_args[1].get('operation_name')
- self.assertEqual(operation_name, 'FakeOperation')
+ assert operation_name == 'FakeOperation'
def test_destination_region_always_changed(self):
# If the user provides a destination region, we will still
@@ -276,12 +271,11 @@ class TestHandlers(BaseSessionTest):
handlers.inject_presigned_url_ec2(
request_dict, request_signer, operation_model)
- self.assertIn('https://ec2.us-west-2.amazonaws.com?',
- params['PresignedUrl'])
+ assert 'https://ec2.us-west-2.amazonaws.com?' in params['PresignedUrl']
# Always use the DestinationRegion from the endpoint, regardless of
# whatever value the user provides.
- self.assertEqual(params['DestinationRegion'], actual_region)
+ assert params['DestinationRegion'] == actual_region
def test_inject_presigned_url_rds(self):
operation_model = mock.Mock()
@@ -302,13 +296,11 @@ class TestHandlers(BaseSessionTest):
handlers.inject_presigned_url_rds(
request_dict, request_signer, operation_model)
- self.assertIn('https://rds.us-west-2.amazonaws.com?',
- params['PreSignedUrl'])
- self.assertIn('X-Amz-Signature',
- params['PreSignedUrl'])
- self.assertIn('DestinationRegion', params['PreSignedUrl'])
+ assert 'https://rds.us-west-2.amazonaws.com?' in params['PreSignedUrl']
+ assert 'X-Amz-Signature' in params['PreSignedUrl']
+ assert 'DestinationRegion' in params['PreSignedUrl']
# We should not populate the destination region for rds
- self.assertNotIn('DestinationRegion', params)
+ assert 'DestinationRegion' not in params
def test_source_region_removed(self):
operation_model = mock.Mock()
@@ -333,7 +325,7 @@ class TestHandlers(BaseSessionTest):
model=operation_model
)
- self.assertNotIn('SourceRegion', params)
+ assert 'SourceRegion' not in params
def test_source_region_removed_when_presigned_url_provided_for_rds(self):
operation_model = mock.Mock()
@@ -357,7 +349,7 @@ class TestHandlers(BaseSessionTest):
model=operation_model
)
- self.assertNotIn('SourceRegion', params)
+ assert 'SourceRegion' not in params
def test_dest_region_removed(self):
operation_model = mock.Mock()
@@ -381,7 +373,7 @@ class TestHandlers(BaseSessionTest):
model=operation_model
)
- self.assertNotIn('DestinationRegion', params)
+ assert 'DestinationRegion' not in params
def test_presigned_url_already_present_for_rds(self):
operation_model = mock.Mock()
@@ -397,7 +389,7 @@ class TestHandlers(BaseSessionTest):
request_signer=request_signer,
model=operation_model
)
- self.assertEqual(params['body']['PresignedUrl'], 'https://foo')
+ assert params['body']['PresignedUrl'] == 'https://foo'
def test_presigned_url_casing_changed_for_rds(self):
operation_model = mock.Mock()
@@ -421,10 +413,9 @@ class TestHandlers(BaseSessionTest):
model=operation_model
)
- self.assertNotIn('PresignedUrl', params)
- self.assertIn('https://rds.us-west-2.amazonaws.com?',
- params['PreSignedUrl'])
- self.assertIn('X-Amz-Signature', params['PreSignedUrl'])
+ assert 'PresignedUrl' not in params
+ assert 'https://rds.us-west-2.amazonaws.com?' in params['PreSignedUrl']
+ assert 'X-Amz-Signature' in params['PreSignedUrl']
def test_500_status_code_set_for_200_response(self):
http_response = mock.Mock()
@@ -438,7 +429,7 @@ class TestHandlers(BaseSessionTest):
</Error>
"""
handlers.check_for_200_error((http_response, {}))
- self.assertEqual(http_response.status_code, 500)
+ assert http_response.status_code == 500
def test_200_response_with_no_error_left_untouched(self):
http_response = mock.Mock()
@@ -446,7 +437,7 @@ class TestHandlers(BaseSessionTest):
http_response.content = "<NotAnError></NotAnError>"
handlers.check_for_200_error((http_response, {}))
# We don't touch the status code since there are no errors present.
- self.assertEqual(http_response.status_code, 200)
+ assert http_response.status_code == 200
def test_500_response_can_be_none(self):
# A 500 response can raise an exception, which means the response
@@ -503,13 +494,13 @@ class TestHandlers(BaseSessionTest):
model = OperationModel(operation_def, ServiceModel(service_def))
self.session.emit(event, params=params, model=model)
- self.assertEqual(params['Id'], 'ABC123')
- self.assertEqual(params['HostedZoneId'], 'ABC123')
- self.assertEqual(params['ResourceId'], 'DEF456')
- self.assertEqual(params['DelegationSetId'], 'GHI789')
+ assert params['Id'] == 'ABC123'
+ assert params['HostedZoneId'] == 'ABC123'
+ assert params['ResourceId'] == 'DEF456'
+ assert params['DelegationSetId'] == 'GHI789'
# This one should have been left alone
- self.assertEqual(params['Other'], '/hostedzone/foo')
+ assert params['Other'] == '/hostedzone/foo'
def test_route53_resource_id_missing_input_shape(self):
event = 'before-parameter-build.route53.GetHostedZone'
@@ -524,7 +515,7 @@ class TestHandlers(BaseSessionTest):
model = OperationModel(operation_def, ServiceModel(service_def))
self.session.emit(event, params=params, model=model)
- self.assertEqual(params['HostedZoneId'], '/hostedzone/ABC123')
+ assert params['HostedZoneId'] == '/hostedzone/ABC123'
def test_run_instances_userdata(self):
user_data = 'This is a test'
@@ -536,7 +527,7 @@ class TestHandlers(BaseSessionTest):
'MinCount': 1,
'MaxCount': 5,
'UserData': b64_user_data}
- self.assertEqual(params, result)
+ assert params == result
def test_run_instances_userdata_blob(self):
# Ensure that binary can be passed in as user data.
@@ -551,7 +542,7 @@ class TestHandlers(BaseSessionTest):
'MinCount': 1,
'MaxCount': 5,
'UserData': b64_user_data}
- self.assertEqual(params, result)
+ assert params == result
def test_get_template_has_error_response(self):
original = {'Error': {'Code': 'Message'}}
@@ -559,7 +550,7 @@ class TestHandlers(BaseSessionTest):
handlers.json_decode_template_body(parsed=handler_input)
# The handler should not have changed the response because it's
# an error response.
- self.assertEqual(original, handler_input)
+ assert original == handler_input
def test_does_decode_template_body_in_order(self):
expected_ordering = OrderedDict([
@@ -575,9 +566,9 @@ class TestHandlers(BaseSessionTest):
handlers.json_decode_template_body(parsed=parsed_response)
result = parsed_response['TemplateBody']
- self.assertTrue(isinstance(result, OrderedDict))
+ assert isinstance(result, OrderedDict)
for element, expected_element in zip(result, expected_ordering):
- self.assertEqual(element, expected_element)
+ assert element == expected_element
def test_decode_json_policy(self):
parsed = {
@@ -613,21 +604,21 @@ class TestHandlers(BaseSessionTest):
model = ServiceModel(service_def)
op_model = model.operation_model('Foo')
handlers.json_decode_policies(parsed, op_model)
- self.assertEqual(parsed['Document'], {'foo': 'foobarbaz'})
+ assert parsed['Document'] == {'foo': 'foobarbaz'}
no_document = {'Other': 'bar'}
handlers.json_decode_policies(no_document, op_model)
- self.assertEqual(no_document, {'Other': 'bar'})
+ assert no_document == {'Other': 'bar'}
def test_inject_account_id(self):
params = {}
handlers.inject_account_id(params)
- self.assertEqual(params['accountId'], '-')
+ assert params['accountId'] == '-'
def test_account_id_not_added_if_present(self):
params = {'accountId': 'foo'}
handlers.inject_account_id(params)
- self.assertEqual(params['accountId'], 'foo')
+ assert params['accountId'] == 'foo'
def test_glacier_version_header_added(self):
request_dict = {
@@ -635,22 +626,21 @@ class TestHandlers(BaseSessionTest):
}
model = ServiceModel({'metadata': {'apiVersion': '2012-01-01'}})
handlers.add_glacier_version(model, request_dict)
- self.assertEqual(request_dict['headers']['x-amz-glacier-version'],
- '2012-01-01')
+ assert request_dict['headers']['x-amz-glacier-version'] == '2012-01-01'
def test_application_json_header_added(self):
request_dict = {
'headers': {}
}
handlers.add_accept_header(None, request_dict)
- self.assertEqual(request_dict['headers']['Accept'], 'application/json')
+ assert request_dict['headers']['Accept'] == 'application/json'
def test_accept_header_not_added_if_present(self):
request_dict = {
'headers': {'Accept': 'application/yaml'}
}
handlers.add_accept_header(None, request_dict)
- self.assertEqual(request_dict['headers']['Accept'], 'application/yaml')
+ assert request_dict['headers']['Accept'] == 'application/yaml'
def test_glacier_checksums_added(self):
request_dict = {
@@ -658,16 +648,13 @@ class TestHandlers(BaseSessionTest):
'body': six.BytesIO(b'hello world'),
}
handlers.add_glacier_checksums(request_dict)
- self.assertIn('x-amz-content-sha256', request_dict['headers'])
- self.assertIn('x-amz-sha256-tree-hash', request_dict['headers'])
- self.assertEqual(
- request_dict['headers']['x-amz-content-sha256'],
- 'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9')
- self.assertEqual(
- request_dict['headers']['x-amz-sha256-tree-hash'],
- 'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9')
+ assert 'x-amz-content-sha256' in request_dict['headers']
+ assert 'x-amz-sha256-tree-hash' in request_dict['headers']
+ expected_val = 'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9'
+ assert request_dict['headers']['x-amz-content-sha256'] == expected_val
+ assert request_dict['headers']['x-amz-sha256-tree-hash'] == expected_val
# And verify that the body can still be read.
- self.assertEqual(request_dict['body'].read(), b'hello world')
+ assert request_dict['body'].read() == b'hello world'
def test_tree_hash_added_only_if_not_exists(self):
request_dict = {
@@ -677,8 +664,7 @@ class TestHandlers(BaseSessionTest):
'body': six.BytesIO(b'hello world'),
}
handlers.add_glacier_checksums(request_dict)
- self.assertEqual(request_dict['headers']['x-amz-sha256-tree-hash'],
- 'pre-exists')
+ assert request_dict['headers']['x-amz-sha256-tree-hash'] == 'pre-exists'
def test_checksum_added_only_if_not_exists(self):
request_dict = {
@@ -688,8 +674,7 @@ class TestHandlers(BaseSessionTest):
'body': six.BytesIO(b'hello world'),
}
handlers.add_glacier_checksums(request_dict)
- self.assertEqual(request_dict['headers']['x-amz-content-sha256'],
- 'pre-exists')
+ assert request_dict['headers']['x-amz-content-sha256'] == 'pre-exists'
def test_glacier_checksums_support_raw_bytes(self):
request_dict = {
@@ -697,12 +682,9 @@ class TestHandlers(BaseSessionTest):
'body': b'hello world',
}
handlers.add_glacier_checksums(request_dict)
- self.assertEqual(
- request_dict['headers']['x-amz-content-sha256'],
- 'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9')
- self.assertEqual(
- request_dict['headers']['x-amz-sha256-tree-hash'],
- 'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9')
+ expected_val = 'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9'
+ assert request_dict['headers']['x-amz-content-sha256'] == expected_val
+ assert request_dict['headers']['x-amz-sha256-tree-hash'] == expected_val
def test_switch_host_with_param(self):
request = AWSRequest()
@@ -712,7 +694,7 @@ class TestHandlers(BaseSessionTest):
request.data = data.encode('utf-8')
request.url = url
handlers.switch_host_with_param(request, 'PredictEndpoint')
- self.assertEqual(request.url, new_endpoint)
+ assert request.url == new_endpoint
def test_invalid_char_in_bucket_raises_exception(self):
params = {
@@ -720,7 +702,7 @@ class TestHandlers(BaseSessionTest):
'Key': 'foo',
'Body': b'asdf',
}
- with self.assertRaises(ParamValidationError):
+ with pytest.raises(ParamValidationError):
handlers.validate_bucket_name(params)
def test_bucket_too_long_raises_exception(self):
@@ -729,7 +711,7 @@ class TestHandlers(BaseSessionTest):
'Key': 'foo',
'Body': b'asdf',
}
- with self.assertRaises(ParamValidationError):
+ with pytest.raises(ParamValidationError):
handlers.validate_bucket_name(params)
def test_not_dns_compat_but_still_valid_bucket_name(self):
@@ -738,19 +720,16 @@ class TestHandlers(BaseSessionTest):
'Key': 'foo',
'Body': b'asdf',
}
- self.assertIsNone(handlers.validate_bucket_name(params))
+ assert handlers.validate_bucket_name(params) is None
def test_valid_bucket_name_hyphen(self):
- self.assertIsNone(
- handlers.validate_bucket_name({'Bucket': 'my-bucket-name'}))
+ assert handlers.validate_bucket_name({'Bucket': 'my-bucket-name'}) is None
def test_valid_bucket_name_underscore(self):
- self.assertIsNone(
- handlers.validate_bucket_name({'Bucket': 'my_bucket_name'}))
+ assert handlers.validate_bucket_name({'Bucket': 'my_bucket_name'}) is None
def test_valid_bucket_name_period(self):
- self.assertIsNone(
- handlers.validate_bucket_name({'Bucket': 'my.bucket.name'}))
+ assert handlers.validate_bucket_name({'Bucket': 'my.bucket.name'}) is None
def test_validation_is_noop_if_no_bucket_param_exists(self):
self.assertIsNone(handlers.validate_bucket_name(params={}))
@@ -773,21 +752,21 @@ class TestHandlers(BaseSessionTest):
self.fail('The s3 arn: %s should pass validation' % arn)
def test_validation_is_global_s3_bucket_arn(self):
- with self.assertRaises(ParamValidationError):
+ with pytest.raises(ParamValidationError):
arn = 'arn:aws:s3:::mybucket'
handlers.validate_bucket_name({'Bucket': arn})
def test_validation_is_other_service_arn(self):
- with self.assertRaises(ParamValidationError):
+ with pytest.raises(ParamValidationError):
arn = 'arn:aws:ec2:us-west-2:123456789012:instance:myinstance'
handlers.validate_bucket_name({'Bucket': arn})
def test_validate_non_ascii_metadata_values(self):
- with self.assertRaises(ParamValidationError):
+ with pytest.raises(ParamValidationError):
handlers.validate_ascii_metadata({'Metadata': {'foo': u'\u2713'}})
def test_validate_non_ascii_metadata_keys(self):
- with self.assertRaises(ParamValidationError):
+ with pytest.raises(ParamValidationError):
handlers.validate_ascii_metadata(
{'Metadata': {u'\u2713': 'bar'}})
@@ -795,24 +774,24 @@ class TestHandlers(BaseSessionTest):
original = {'NotMetadata': ''}
copied = original.copy()
handlers.validate_ascii_metadata(copied)
- self.assertEqual(original, copied)
+ assert original == copied
def test_validation_passes_when_all_ascii_chars(self):
original = {'Metadata': {'foo': 'bar'}}
copied = original.copy()
handlers.validate_ascii_metadata(original)
- self.assertEqual(original, copied)
+ assert original == copied
def test_set_encoding_type(self):
params = {}
context = {}
handlers.set_list_objects_encoding_type_url(params, context=context)
- self.assertEqual(params['EncodingType'], 'url')
- self.assertTrue(context['encoding_type_auto_set'])
+ assert params['EncodingType'] == 'url'
+ assert context['encoding_type_auto_set']
params['EncodingType'] = 'new_value'
handlers.set_list_objects_encoding_type_url(params, context={})
- self.assertEqual(params['EncodingType'], 'new_value')
+ assert params['EncodingType'] == 'new_value'
def test_decode_list_objects(self):
parsed = {
@@ -821,7 +800,7 @@ class TestHandlers(BaseSessionTest):
}
context = {'encoding_type_auto_set': True}
handlers.decode_list_object(parsed, context=context)
- self.assertEqual(parsed['Contents'][0]['Key'], u'\xe7\xf6s%asd\x08')
+ assert parsed['Contents'][0]['Key'] == u'\xe7\xf6s%asd\x08'
def test_decode_list_objects_does_not_decode_without_context(self):
parsed = {
@@ -829,7 +808,7 @@ class TestHandlers(BaseSessionTest):
'EncodingType': 'url',
}
handlers.decode_list_object(parsed, context={})
- self.assertEqual(parsed['Contents'][0]['Key'], u'%C3%A7%C3%B6s%25asd')
+ assert parsed['Contents'][0]['Key'] == u'%C3%A7%C3%B6s%25asd'
def test_decode_list_objects_with_marker(self):
parsed = {
@@ -838,7 +817,7 @@ class TestHandlers(BaseSessionTest):
}
context = {'encoding_type_auto_set': True}
handlers.decode_list_object(parsed, context=context)
- self.assertEqual(parsed['Marker'], u'\xe7\xf6s% asd\x08 c')
+ assert parsed['Marker'] == u'\xe7\xf6s% asd\x08 c'
def test_decode_list_objects_with_nextmarker(self):
parsed = {
@@ -847,7 +826,7 @@ class TestHandlers(BaseSessionTest):
}
context = {'encoding_type_auto_set': True}
handlers.decode_list_object(parsed, context=context)
- self.assertEqual(parsed['NextMarker'], u'\xe7\xf6s% asd\x08 c')
+ assert parsed['NextMarker'] == u'\xe7\xf6s% asd\x08 c'
def test_decode_list_objects_with_common_prefixes(self):
parsed = {
@@ -856,8 +835,7 @@ class TestHandlers(BaseSessionTest):
}
context = {'encoding_type_auto_set': True}
handlers.decode_list_object(parsed, context=context)
- self.assertEqual(parsed['CommonPrefixes'][0]['Prefix'],
- u'\xe7\xf6s% asd\x08 c')
+ assert parsed['CommonPrefixes'][0]['Prefix'] == u'\xe7\xf6s% asd\x08 c'
def test_decode_list_objects_with_delimiter(self):
parsed = {
@@ -866,7 +844,7 @@ class TestHandlers(BaseSessionTest):
}
context = {'encoding_type_auto_set': True}
handlers.decode_list_object(parsed, context=context)
- self.assertEqual(parsed['Delimiter'], u'\xe7\xf6s% asd\x08 c')
+ assert parsed['Delimiter'] == u'\xe7\xf6s% asd\x08 c'
def test_decode_list_objects_v2(self):
parsed = {
@@ -875,7 +853,7 @@ class TestHandlers(BaseSessionTest):
}
context = {'encoding_type_auto_set': True}
handlers.decode_list_object_v2(parsed, context=context)
- self.assertEqual(parsed['Contents'][0]['Key'], u'\xe7\xf6s%asd\x08')
+ assert parsed['Contents'][0]['Key'] == u'\xe7\xf6s%asd\x08'
def test_decode_list_objects_v2_does_not_decode_without_context(self):
parsed = {
@@ -883,7 +861,7 @@ class TestHandlers(BaseSessionTest):
'EncodingType': 'url',
}
handlers.decode_list_object_v2(parsed, context={})
- self.assertEqual(parsed['Contents'][0]['Key'], u'%C3%A7%C3%B6s%25asd')
+ assert parsed['Contents'][0]['Key'] == u'%C3%A7%C3%B6s%25asd'
def test_decode_list_objects_v2_with_delimiter(self):
parsed = {
@@ -892,7 +870,7 @@ class TestHandlers(BaseSessionTest):
}
context = {'encoding_type_auto_set': True}
handlers.decode_list_object_v2(parsed, context=context)
- self.assertEqual(parsed['Delimiter'], u'\xe7\xf6s% asd\x08 c')
+ assert parsed['Delimiter'] == u'\xe7\xf6s% asd\x08 c'
def test_decode_list_objects_v2_with_prefix(self):
parsed = {
@@ -901,7 +879,7 @@ class TestHandlers(BaseSessionTest):
}
context = {'encoding_type_auto_set': True}
handlers.decode_list_object_v2(parsed, context=context)
- self.assertEqual(parsed['Prefix'], u'\xe7\xf6s% asd\x08 c')
+ assert parsed['Prefix'] == u'\xe7\xf6s% asd\x08 c'
def test_decode_list_objects_v2_does_not_decode_continuationtoken(self):
parsed = {
@@ -910,8 +888,7 @@ class TestHandlers(BaseSessionTest):
}
context = {'encoding_type_auto_set': True}
handlers.decode_list_object_v2(parsed, context=context)
- self.assertEqual(
- parsed['ContinuationToken'], u"%C3%A7%C3%B6s%25%20asd%08+c")
+ assert parsed['ContinuationToken'] == u"%C3%A7%C3%B6s%25%20asd%08+c"
def test_decode_list_objects_v2_with_startafter(self):
parsed = {
@@ -920,7 +897,7 @@ class TestHandlers(BaseSessionTest):
}
context = {'encoding_type_auto_set': True}
handlers.decode_list_object_v2(parsed, context=context)
- self.assertEqual(parsed['StartAfter'], u'\xe7\xf6s% asd\x08 c')
+ assert parsed['StartAfter'] == u'\xe7\xf6s% asd\x08 c'
def test_decode_list_objects_v2_with_common_prefixes(self):
parsed = {
@@ -929,62 +906,61 @@ class TestHandlers(BaseSessionTest):
}
context = {'encoding_type_auto_set': True}
handlers.decode_list_object_v2(parsed, context=context)
- self.assertEqual(parsed['CommonPrefixes'][0]['Prefix'],
- u'\xe7\xf6s% asd\x08 c')
+ assert parsed['CommonPrefixes'][0]['Prefix'] == u'\xe7\xf6s% asd\x08 c'
def test_set_operation_specific_signer_no_auth_type(self):
signing_name = 'myservice'
context = {'auth_type': None}
response = handlers.set_operation_specific_signer(
context=context, signing_name=signing_name)
- self.assertIsNone(response)
+ assert response is None
def test_set_operation_specific_signer_unsigned(self):
signing_name = 'myservice'
context = {'auth_type': 'none'}
response = handlers.set_operation_specific_signer(
context=context, signing_name=signing_name)
- self.assertEqual(response, botocore.UNSIGNED)
+ assert response == botocore.UNSIGNED
def test_set_operation_specific_signer_v4(self):
signing_name = 'myservice'
context = {'auth_type': 'v4'}
response = handlers.set_operation_specific_signer(
context=context, signing_name=signing_name)
- self.assertEqual(response, 'v4')
+ assert response == 'v4'
def test_set_operation_specific_signer_s3v4(self):
signing_name = 's3'
context = {'auth_type': 'v4'}
response = handlers.set_operation_specific_signer(
context=context, signing_name=signing_name)
- self.assertEqual(response, 's3v4')
+ assert response == 's3v4'
def test_set_operation_specific_signer_v4_unsinged_payload(self):
signing_name = 'myservice'
context = {'auth_type': 'v4-unsigned-body'}
response = handlers.set_operation_specific_signer(
context=context, signing_name=signing_name)
- self.assertEqual(response, 'v4')
- self.assertEqual(context.get('payload_signing_enabled'), False)
+ assert response == 'v4'
+ assert not context.get('payload_signing_enabled')
def test_set_operation_specific_signer_s3v4_unsigned_payload(self):
signing_name = 's3'
context = {'auth_type': 'v4-unsigned-body'}
response = handlers.set_operation_specific_signer(
context=context, signing_name=signing_name)
- self.assertEqual(response, 's3v4')
- self.assertEqual(context.get('payload_signing_enabled'), False)
+ assert response == 's3v4'
+ assert not context.get('payload_signing_enabled')
class TestConvertStringBodyToFileLikeObject(BaseSessionTest):
def assert_converts_to_file_like_object_with_bytes(self, body, body_bytes):
params = {'Body': body}
handlers.convert_body_to_file_like_object(params)
- self.assertTrue(hasattr(params['Body'], 'read'))
+ assert hasattr(params['Body'], 'read')
contents = params['Body'].read()
- self.assertIsInstance(contents, six.binary_type)
- self.assertEqual(contents, body_bytes)
+ assert isinstance(contents, six.binary_type)
+ assert contents == body_bytes
def test_string(self):
self.assert_converts_to_file_like_object_with_bytes('foo', b'foo')
@@ -998,7 +974,7 @@ class TestConvertStringBodyToFileLikeObject(BaseSessionTest):
body = six.StringIO()
params = {'Body': body}
handlers.convert_body_to_file_like_object(params)
- self.assertEqual(params['Body'], body)
+ assert params['Body'] == body
def test_unicode(self):
self.assert_converts_to_file_like_object_with_bytes(u'bar', b'bar')
@@ -1036,13 +1012,13 @@ class TestRetryHandlerOrder(BaseSessionTest):
# Technically, as long as the relative order is preserved, we don't
# care about the absolute order.
names = self.get_handler_names(responses)
- self.assertIn('check_for_200_error', names)
- self.assertIn('RetryHandler', names)
+ assert 'check_for_200_error' in names
+ assert 'RetryHandler' in names
s3_200_handler = names.index('check_for_200_error')
general_retry_handler = names.index('RetryHandler')
- self.assertTrue(s3_200_handler < general_retry_handler,
- "S3 200 error handler was supposed to be before "
- "the general retry handler, but it was not.")
+ message = ("S3 200 error handler was supposed to be"
+ "before the general retry handler, but it was not.")
+ assert s3_200_handler < general_retry_handler, messge
class BaseMD5Test(BaseSessionTest):
@@ -1076,10 +1052,10 @@ class TestSSEMD5(BaseMD5Test):
def test_raises_error_when_md5_unavailable(self):
self.set_md5_available(False)
- with self.assertRaises(MD5UnavailableError):
+ with pytest.raises(MD5UnavailableError):
handlers.sse_md5({'SSECustomerKey': b'foo'})
- with self.assertRaises(MD5UnavailableError):
+ with pytest.raises(MD5UnavailableError):
handlers.copy_source_sse_md5({'CopySourceSSECustomerKey': b'foo'})
def test_sse_params(self):
@@ -1089,16 +1065,16 @@ class TestSSEMD5(BaseMD5Test):
params = {'SSECustomerKey': b'bar',
'SSECustomerAlgorithm': 'AES256'}
self.session.emit(event, params=params, model=mock.MagicMock())
- self.assertEqual(params['SSECustomerKey'], 'YmFy')
- self.assertEqual(params['SSECustomerKeyMD5'], 'Zm9v')
+ assert params['SSECustomerKey'] == 'YmFy'
+ assert params['SSECustomerKeyMD5'] == 'Zm9v'
def test_sse_params_as_str(self):
event = 'before-parameter-build.s3.PutObject'
params = {'SSECustomerKey': 'bar',
'SSECustomerAlgorithm': 'AES256'}
self.session.emit(event, params=params, model=mock.MagicMock())
- self.assertEqual(params['SSECustomerKey'], 'YmFy')
- self.assertEqual(params['SSECustomerKeyMD5'], 'Zm9v')
+ assert params['SSECustomerKey'] == 'YmFy'
+ assert params['SSECustomerKeyMD5'] == 'Zm9v'
def test_copy_source_sse_params(self):
for op in ['CopyObject', 'UploadPartCopy']:
@@ -1106,16 +1082,16 @@ class TestSSEMD5(BaseMD5Test):
params = {'CopySourceSSECustomerKey': b'bar',
'CopySourceSSECustomerAlgorithm': 'AES256'}
self.session.emit(event, params=params, model=mock.MagicMock())
- self.assertEqual(params['CopySourceSSECustomerKey'], 'YmFy')
- self.assertEqual(params['CopySourceSSECustomerKeyMD5'], 'Zm9v')
+ assert params['CopySourceSSECustomerKey'] == 'YmFy'
+ assert params['CopySourceSSECustomerKeyMD5'] == 'Zm9v'
def test_copy_source_sse_params_as_str(self):
event = 'before-parameter-build.s3.CopyObject'
params = {'CopySourceSSECustomerKey': 'bar',
'CopySourceSSECustomerAlgorithm': 'AES256'}
self.session.emit(event, params=params, model=mock.MagicMock())
- self.assertEqual(params['CopySourceSSECustomerKey'], 'YmFy')
- self.assertEqual(params['CopySourceSSECustomerKeyMD5'], 'Zm9v')
+ assert params['CopySourceSSECustomerKey'] == 'YmFy'
+ assert params['CopySourceSSECustomerKeyMD5'] == 'Zm9v'
class TestAddMD5(BaseMD5Test):
@@ -1137,7 +1113,7 @@ class TestAddMD5(BaseMD5Test):
context = self.get_context()
conditionally_calculate_md5(
request_dict, request_signer=request_signer, context=context)
- self.assertTrue('Content-MD5' in request_dict['headers'])
+ assert 'Content-MD5' in request_dict['headers']
def test_adds_md5_when_s3v4(self):
credentials = Credentials('key', 'secret')
@@ -1151,7 +1127,7 @@ class TestAddMD5(BaseMD5Test):
context = self.get_context({'payload_signing_enabled': False})
conditionally_calculate_md5(
request_dict, request_signer=request_signer, context=context)
- self.assertTrue('Content-MD5' in request_dict['headers'])
+ assert 'Content-MD5' in request_dict['headers']
def test_conditional_does_not_add_when_md5_unavailable(self):
credentials = Credentials('key', 'secret')
@@ -1167,7 +1143,7 @@ class TestAddMD5(BaseMD5Test):
with mock.patch('botocore.utils.MD5_AVAILABLE', False):
conditionally_calculate_md5(
request_dict, request_signer=request_signer, context=context)
- self.assertFalse('Content-MD5' in request_dict['headers'])
+ assert'Content-MD5' not in request_dict['headers']
def test_add_md5_raises_error_when_md5_unavailable(self):
credentials = Credentials('key', 'secret')
@@ -1179,7 +1155,7 @@ class TestAddMD5(BaseMD5Test):
'headers': {}}
self.set_md5_available(False)
- with self.assertRaises(MD5UnavailableError):
+ with pytest.raises(MD5UnavailableError):
conditionally_calculate_md5(
request_dict, request_signer=request_signer)
@@ -1194,7 +1170,7 @@ class TestAddMD5(BaseMD5Test):
context = self.get_context()
conditionally_calculate_md5(
request_dict, request_signer=request_signer, context=context)
- self.assertTrue('Content-MD5' in request_dict['headers'])
+ assert 'Content-MD5' in request_dict['headers']
def test_add_md5_with_file_like_body(self):
request_dict = {
@@ -1203,8 +1179,7 @@ class TestAddMD5(BaseMD5Test):
}
self.md5_digest.return_value = b'8X\xf6"0\xac<\x91_0\x0cfC\x12\xc6?'
conditionally_calculate_md5(request_dict)
- self.assertEqual(request_dict['headers']['Content-MD5'],
- 'OFj2IjCsPJFfMAxmQxLGPw==')
+ assert request_dict['headers']['Content-MD5'] == 'OFj2IjCsPJFfMAxmQxLGPw=='
def test_add_md5_with_bytes_object(self):
request_dict = {
@@ -1213,9 +1188,7 @@ class TestAddMD5(BaseMD5Test):
}
self.md5_digest.return_value = b'8X\xf6"0\xac<\x91_0\x0cfC\x12\xc6?'
conditionally_calculate_md5(request_dict)
- self.assertEqual(
- request_dict['headers']['Content-MD5'],
- 'OFj2IjCsPJFfMAxmQxLGPw==')
+ assert request_dict['headers']['Content-MD5'] == 'OFj2IjCsPJFfMAxmQxLGPw=='
def test_add_md5_with_empty_body(self):
request_dict = {
@@ -1235,9 +1208,7 @@ class TestAddMD5(BaseMD5Test):
}
self.md5_digest.return_value = b'8X\xf6"0\xac<\x91_0\x0cfC\x12\xc6?'
conditionally_calculate_md5(request_dict)
- self.assertEqual(
- request_dict['headers']['Content-MD5'],
- 'OFj2IjCsPJFfMAxmQxLGPw==')
+ assert request_dict['headers']['Content-MD5'] == 'OFj2IjCsPJFfMAxmQxLGPw=='
class TestParameterAlias(unittest.TestCase):
@@ -1259,14 +1230,14 @@ class TestParameterAlias(unittest.TestCase):
params = {self.alias_name: value}
self.parameter_alias.alias_parameter_in_call(
params, self.operation_model)
- self.assertEqual(params, {self.original_name: value})
+ assert params == {self.original_name: value}
def test_alias_parameter_and_original_in_call(self):
params = {
self.original_name: 'orginal_value',
self.alias_name: 'alias_value'
}
- with self.assertRaises(AliasConflictParameterError):
+ with pytest.raises(AliasConflictParameterError):
self.parameter_alias.alias_parameter_in_call(
params, self.operation_model)
@@ -1275,7 +1246,7 @@ class TestParameterAlias(unittest.TestCase):
params = {self.original_name: value}
self.parameter_alias.alias_parameter_in_call(
params, self.operation_model)
- self.assertEqual(params, {self.original_name: value})
+ assert params == {self.original_name: value}
def test_does_not_alias_parameter_for_no_input_shape(self):
value = 'value'
@@ -1283,7 +1254,7 @@ class TestParameterAlias(unittest.TestCase):
self.operation_model.input_shape = None
self.parameter_alias.alias_parameter_in_call(
params, self.operation_model)
- self.assertEqual(params, {self.alias_name: value})
+ assert params == {self.alias_name: value}
def test_does_not_alias_parameter_for_not_modeled_member(self):
value = 'value'
@@ -1294,7 +1265,7 @@ class TestParameterAlias(unittest.TestCase):
self.operation_model.input_shape = request_shape
self.parameter_alias.alias_parameter_in_call(
params, self.operation_model)
- self.assertEqual(params, {self.alias_name: value})
+ assert params == {self.alias_name: value}
def test_alias_parameter_in_documentation_request_params(self):
RequestParamsDocumenter(
@@ -1305,10 +1276,10 @@ class TestParameterAlias(unittest.TestCase):
self.sample_section
)
contents = self.sample_section.flush_structure().decode('utf-8')
- self.assertIn(':type ' + self.alias_name + ':', contents)
- self.assertIn(':param ' + self.alias_name + ':', contents)
- self.assertNotIn(':type ' + self.original_name + ':', contents)
- self.assertNotIn(':param ' + self.original_name + ':', contents)
+ assert ':type ' + self.alias_name + ':' in contents
+ assert ':param ' + self.alias_name + ':' in contents
+ assert ':type ' + self.original_name + ':' not in contents
+ assert ':param ' + self.original_name + ':' not in contents
def test_alias_parameter_in_documentation_request_example(self):
RequestExampleDocumenter(
@@ -1319,8 +1290,8 @@ class TestParameterAlias(unittest.TestCase):
self.sample_section
)
contents = self.sample_section.flush_structure().decode('utf-8')
- self.assertIn(self.alias_name + '=', contents)
- self.assertNotIn(self.original_name + '=', contents)
+ assert self.alias_name + '=' in contents
+ assert self.original_name + '=' not in contents
class TestCommandAlias(unittest.TestCase):
@@ -1330,7 +1301,7 @@ class TestCommandAlias(unittest.TestCase):
client.foo.return_value = 'bar'
response = alias(client=client)()
- self.assertEqual(response, 'bar')
+ assert response == 'bar'
class TestPrependToHost(unittest.TestCase):
@@ -1349,28 +1320,28 @@ class TestPrependToHost(unittest.TestCase):
def test_does_prepend_to_host(self):
prepended = self._prepend_to_host('https://bar.example.com/', 'foo')
- self.assertEqual(prepended, 'https://foo.bar.example.com/')
+ assert prepended == 'https://foo.bar.example.com/'
def test_does_prepend_to_host_with_http(self):
prepended = self._prepend_to_host('http://bar.example.com/', 'foo')
- self.assertEqual(prepended, 'http://foo.bar.example.com/')
+ assert prepended == 'http://foo.bar.example.com/'
def test_does_prepend_to_host_with_path(self):
prepended = self._prepend_to_host(
'https://bar.example.com/path', 'foo')
- self.assertEqual(prepended, 'https://foo.bar.example.com/path')
+ assert prepended == 'https://foo.bar.example.com/path'
def test_does_prepend_to_host_with_more_components(self):
prepended = self._prepend_to_host(
'https://bar.baz.example.com/path', 'foo')
- self.assertEqual(prepended, 'https://foo.bar.baz.example.com/path')
+ assert prepended == 'https://foo.bar.baz.example.com/path'
def test_does_validate_long_host(self):
- with self.assertRaises(ParamValidationError):
+ with pytest.raises(ParamValidationError):
self._prepend_to_host(
'https://example.com/path', 'toolong'*100)
def test_does_validate_host_with_illegal_char(self):
- with self.assertRaises(ParamValidationError):
+ with pytest.raises(ParamValidationError):
self._prepend_to_host(
'https://example.com/path', 'host#name')
diff --git a/tests/unit/test_history.py b/tests/unit/test_history.py
index d1113956..439cc3c9 100644
--- a/tests/unit/test_history.py
+++ b/tests/unit/test_history.py
@@ -82,9 +82,9 @@ class TestHistoryRecorder(unittest.TestCase):
class TestGetHistoryRecorder(unittest.TestCase):
def test_can_get_history_recorder(self):
recorder = get_global_history_recorder()
- self.assertTrue(isinstance(recorder, HistoryRecorder))
+ assert isinstance(recorder, HistoryRecorder)
def test_does_reuse_history_recorder(self):
recorder_1 = get_global_history_recorder()
recorder_2 = get_global_history_recorder()
- self.assertIs(recorder_1, recorder_2)
+ assert recorder_1 is recorder_2
diff --git a/tests/unit/test_hooks.py b/tests/unit/test_hooks.py
index 885686c2..9c9c4243 100644
--- a/tests/unit/test_hooks.py
+++ b/tests/unit/test_hooks.py
@@ -12,6 +12,7 @@
# language governing permissions and limitations under the License.
import copy
import functools
+import pytest
from tests import unittest
from functools import partial
@@ -31,22 +32,22 @@ class TestHierarchicalEventEmitter(unittest.TestCase):
def test_non_dot_behavior(self):
self.emitter.register('no-dot', self.hook)
self.emitter.emit('no-dot')
- self.assertEqual(len(self.hook_calls), 1)
+ assert len(self.hook_calls) == 1
def test_with_dots(self):
self.emitter.register('foo.bar.baz', self.hook)
self.emitter.emit('foo.bar.baz')
- self.assertEqual(len(self.hook_calls), 1)
+ assert len(self.hook_calls) == 1
def test_catch_all_hook(self):
self.emitter.register('foo', self.hook)
self.emitter.register('foo.bar', self.hook)
self.emitter.register('foo.bar.baz', self.hook)
self.emitter.emit('foo.bar.baz')
- self.assertEqual(len(self.hook_calls), 3, self.hook_calls)
+ assert len(self.hook_calls) == 3, self.hook_calls
# The hook is called with the same event name three times.
- self.assertEqual([e['event_name'] for e in self.hook_calls],
- ['foo.bar.baz', 'foo.bar.baz', 'foo.bar.baz'])
+ assert [e['event_name'] for e in self.hook_calls] == [
+ 'foo.bar.baz', 'foo.bar.baz', 'foo.bar.baz']
def test_hook_called_in_proper_order(self):
# We should call the hooks from most specific to least
@@ -59,7 +60,7 @@ class TestHierarchicalEventEmitter(unittest.TestCase):
lambda **kwargs: calls.append('foo.bar.baz'))
self.emitter.emit('foo.bar.baz')
- self.assertEqual(calls, ['foo.bar.baz', 'foo.bar', 'foo'])
+ assert calls == ['foo.bar.baz', 'foo.bar', 'foo']
class TestAliasedEmitter(unittest.TestCase):
@@ -79,7 +80,7 @@ class TestAliasedEmitter(unittest.TestCase):
emitter.register('foo.bear.baz', self.hook)
emitter.emit('foo.bear.baz')
calls = [e['event_name'] for e in self.hook_calls]
- self.assertEqual(calls, ['foo.bear.baz'])
+ assert calls == ['foo.bear.baz']
def test_aliased_event_emitted(self):
aliases = {'bar': 'bear'}
@@ -87,7 +88,7 @@ class TestAliasedEmitter(unittest.TestCase):
emitter.register('foo.bear.baz', self.hook)
emitter.emit('foo.bar.baz')
calls = [e['event_name'] for e in self.hook_calls]
- self.assertEqual(calls, ['foo.bear.baz'])
+ assert calls == ['foo.bear.baz']
def test_alias_with_dots_emitted(self):
aliases = {'api.bar': 'bear'}
@@ -95,7 +96,7 @@ class TestAliasedEmitter(unittest.TestCase):
emitter.register('foo.bear.baz', self.hook)
emitter.emit('foo.api.bar.baz')
calls = [e['event_name'] for e in self.hook_calls]
- self.assertEqual(calls, ['foo.bear.baz'])
+ assert calls == ['foo.bear.baz']
def test_aliased_event_registered(self):
aliases = {'bar': 'bear'}
@@ -103,7 +104,7 @@ class TestAliasedEmitter(unittest.TestCase):
emitter.register('foo.bar.baz', self.hook)
emitter.emit('foo.bear.baz')
calls = [e['event_name'] for e in self.hook_calls]
- self.assertEqual(calls, ['foo.bear.baz'])
+ assert calls == ['foo.bear.baz']
def test_aliased_event_with_dots_registered(self):
aliases = {'api.bar': 'bear'}
@@ -111,7 +112,7 @@ class TestAliasedEmitter(unittest.TestCase):
emitter.register('foo.api.bar.baz', self.hook)
emitter.emit('foo.bear.baz')
calls = [e['event_name'] for e in self.hook_calls]
- self.assertEqual(calls, ['foo.bear.baz'])
+ assert calls == ['foo.bear.baz']
def test_event_unregistered(self):
aliases = {'bar': 'bear'}
@@ -120,13 +121,13 @@ class TestAliasedEmitter(unittest.TestCase):
emitter.register('foo.bar.baz', self.hook)
emitter.emit('foo.bear.baz')
calls = [e['event_name'] for e in self.hook_calls]
- self.assertEqual(calls, ['foo.bear.baz'])
+ assert calls == ['foo.bear.baz']
self.hook_calls = []
emitter.unregister('foo.bear.baz', self.hook)
emitter.emit('foo.bear.baz')
calls = [e['event_name'] for e in self.hook_calls]
- self.assertEqual(calls, [])
+ assert calls == []
def test_aliased_event_unregistered(self):
aliases = {'bar': 'bear'}
@@ -135,13 +136,13 @@ class TestAliasedEmitter(unittest.TestCase):
emitter.register('foo.bar.baz', self.hook)
emitter.emit('foo.bear.baz')
calls = [e['event_name'] for e in self.hook_calls]
- self.assertEqual(calls, ['foo.bear.baz'])
+ assert calls == ['foo.bear.baz']
self.hook_calls = []
emitter.unregister('foo.bar.baz', self.hook)
emitter.emit('foo.bear.baz')
calls = [e['event_name'] for e in self.hook_calls]
- self.assertEqual(calls, [])
+ assert calls == []
def test_aliased_event_with_dots_unregistered(self):
aliases = {'api.bar': 'bear'}
@@ -150,13 +151,13 @@ class TestAliasedEmitter(unittest.TestCase):
emitter.register('foo.api.bar.baz', self.hook)
emitter.emit('foo.bear.baz')
calls = [e['event_name'] for e in self.hook_calls]
- self.assertEqual(calls, ['foo.bear.baz'])
+ assert calls == ['foo.bear.baz']
self.hook_calls = []
emitter.unregister('foo.api.bar.baz', self.hook)
emitter.emit('foo.bear.baz')
calls = [e['event_name'] for e in self.hook_calls]
- self.assertEqual(calls, [])
+ assert calls == []
class TestStopProcessing(unittest.TestCase):
@@ -185,7 +186,7 @@ class TestStopProcessing(unittest.TestCase):
self.emitter.register('foo', self.hook3)
self.emitter.emit('foo')
- self.assertEqual(self.hook_calls, ['hook1', 'hook2', 'hook3'])
+ assert self.hook_calls == ['hook1', 'hook2', 'hook3']
def test_stop_processing_after_first_response(self):
# Here we register three hooks, but only the first
@@ -195,8 +196,8 @@ class TestStopProcessing(unittest.TestCase):
self.emitter.register('foo', self.hook3)
handler, response = self.emitter.emit_until_response('foo')
- self.assertEqual(response, 'hook2-response')
- self.assertEqual(self.hook_calls, ['hook1', 'hook2'])
+ assert response == 'hook2-response'
+ assert self.hook_calls == ['hook1', 'hook2']
def test_no_responses(self):
# Here we register a handler that will not return a response
@@ -204,21 +205,21 @@ class TestStopProcessing(unittest.TestCase):
self.emitter.register('foo', self.hook1)
responses = self.emitter.emit('foo')
- self.assertEqual(self.hook_calls, ['hook1'])
- self.assertEqual(responses, [(self.hook1, None)])
+ assert self.hook_calls == ['hook1']
+ assert responses == [(self.hook1, None)]
def test_no_handlers(self):
# Here we have no handlers, but still expect a tuple of return
# values.
handler, response = self.emitter.emit_until_response('foo')
- self.assertIsNone(handler)
- self.assertIsNone(response)
+ assert handler is None
+ assert response is None
class TestFirstNonNoneResponse(unittest.TestCase):
def test_all_none(self):
- self.assertIsNone(first_non_none_response([]))
+ assert first_non_none_response([]) is None
def test_first_non_none(self):
correct_value = 'correct_value'
@@ -227,14 +228,13 @@ class TestFirstNonNoneResponse(unittest.TestCase):
# and we don't care about the handler so we just use a value of
# None.
responses = [(None, None), (None, correct_value), (None, wrong_value)]
- self.assertEqual(first_non_none_response(responses), correct_value)
+ assert first_non_none_response(responses) == correct_value
def test_default_value_if_non_none_found(self):
responses = [(None, None), (None, None)]
# If no response is found and a default value is passed in, it will
# be returned.
- self.assertEqual(
- first_non_none_response(responses, default='notfound'), 'notfound')
+ assert first_non_none_response(responses, default='notfound') == 'notfound'
class TestWildcardHandlers(unittest.TestCase):
@@ -256,7 +256,7 @@ class TestWildcardHandlers(unittest.TestCase):
after = len(self.hook_calls)
if not after > starting:
self.fail("Handler was not called for event: %s" % event)
- self.assertEqual(self.hook_calls[-1]['event_name'], event)
+ assert self.hook_calls[-1]['event_name'] == event
def assert_hook_is_not_called_given_event(self, event):
starting = len(self.hook_calls)
@@ -371,7 +371,7 @@ class TestWildcardHandlers(unittest.TestCase):
self.emitter.register('foo.bar.baz', self.hook)
self.emitter.emit('foo.bar.baz')
- self.assertEqual(len(self.hook_calls), 2)
+ assert len(self.hook_calls) == 2
def test_register_with_unique_id(self):
self.emitter.register('foo.bar.baz', self.hook, unique_id='foo')
@@ -382,33 +382,33 @@ class TestWildcardHandlers(unittest.TestCase):
self.emitter.register('foo.other', self.hook, unique_id='foo')
self.emitter.emit('foo.bar.baz')
- self.assertEqual(len(self.hook_calls), 1)
+ assert len(self.hook_calls) == 1
self.hook_calls = []
self.emitter.emit('foo.other')
- self.assertEqual(len(self.hook_calls), 0)
+ assert len(self.hook_calls) == 0
def test_remove_handler_with_unique_id(self):
hook2 = lambda **kwargs: self.hook_calls.append(kwargs)
self.emitter.register('foo.bar.baz', self.hook, unique_id='foo')
self.emitter.register('foo.bar.baz', hook2)
self.emitter.emit('foo.bar.baz')
- self.assertEqual(len(self.hook_calls), 2)
+ assert len(self.hook_calls) == 2
# Reset the hook calls.
self.hook_calls = []
self.emitter.unregister('foo.bar.baz', hook2)
self.emitter.emit('foo.bar.baz')
- self.assertEqual(len(self.hook_calls), 1)
+ assert len(self.hook_calls) == 1
self.hook_calls = []
# Can provide the unique_id to unregister.
self.emitter.unregister('foo.bar.baz', unique_id='foo')
self.emitter.emit('foo.bar.baz')
- self.assertEqual(len(self.hook_calls), 0)
+ assert len(self.hook_calls) == 0
# Same as with not specifying a unique_id, you can call
# unregister multiple times and not get an exception.
@@ -420,25 +420,25 @@ class TestWildcardHandlers(unittest.TestCase):
self.emitter.unregister('foo.bar.baz', self.hook)
self.emitter.emit('foo.bar.baz')
- self.assertEqual(len(self.hook_calls), 1)
+ assert len(self.hook_calls) == 1
self.hook_calls = []
self.emitter.unregister('foo.bar.baz', self.hook)
self.emitter.emit('foo.bar.baz')
- self.assertEqual(len(self.hook_calls), 0)
+ assert len(self.hook_calls) == 0
def test_register_with_uses_count_initially(self):
self.emitter.register('foo', self.hook, unique_id='foo',
unique_id_uses_count=True)
# Subsequent calls must set ``unique_id_uses_count`` to True.
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
self.emitter.register('foo', self.hook, unique_id='foo')
def test_register_with_uses_count_not_initially(self):
self.emitter.register('foo', self.hook, unique_id='foo')
# Subsequent calls must set ``unique_id_uses_count`` to False.
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
self.emitter.register('foo', self.hook, unique_id='foo',
unique_id_uses_count=True)
@@ -449,28 +449,28 @@ class TestWildcardHandlers(unittest.TestCase):
unique_id_uses_count=True)
# Event was registered to use a count so it must be specified
# that a count is used when unregistering
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
self.emitter.unregister('foo', self.hook, unique_id='foo')
# Event should not have been unregistered.
self.emitter.emit('foo')
- self.assertEqual(len(self.hook_calls), 1)
+ assert len(self.hook_calls) == 1
self.emitter.unregister('foo', self.hook, unique_id='foo',
unique_id_uses_count=True)
# Event still should not be unregistered.
self.hook_calls = []
self.emitter.emit('foo')
- self.assertEqual(len(self.hook_calls), 1)
+ assert len(self.hook_calls) == 1
self.emitter.unregister('foo', self.hook, unique_id='foo',
unique_id_uses_count=True)
# Now the event should be unregistered.
self.hook_calls = []
self.emitter.emit('foo')
- self.assertEqual(len(self.hook_calls), 0)
+ assert len(self.hook_calls) == 0
def test_register_with_no_uses_count_unregister(self):
self.emitter.register('foo', self.hook, unique_id='foo')
# The event was not registered to use a count initially
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
self.emitter.unregister('foo', self.hook, unique_id='foo',
unique_id_uses_count=True)
@@ -482,8 +482,7 @@ class TestWildcardHandlers(unittest.TestCase):
self.emitter.register('foo', partial(handler, call_number=1))
self.emitter.register('foo', partial(handler, call_number=2))
self.emitter.emit('foo')
- self.assertEqual([k['call_number'] for k in self.hook_calls],
- [1, 2])
+ assert [k['call_number'] for k in self.hook_calls] == [1, 2]
def test_handler_call_order_with_hierarchy(self):
def handler(call_number, **kwargs):
@@ -501,8 +500,7 @@ class TestWildcardHandlers(unittest.TestCase):
self.emitter.register('foo', partial(handler, call_number=6))
self.emitter.emit('foo.bar.baz')
- self.assertEqual([k['call_number'] for k in self.hook_calls],
- [1, 2, 3, 4, 5, 6])
+ assert [k['call_number'] for k in self.hook_calls] == [1, 2, 3, 4, 5, 6]
def test_register_first_single_level(self):
def handler(call_number, **kwargs):
@@ -518,8 +516,7 @@ class TestWildcardHandlers(unittest.TestCase):
self.emitter.register('foo', partial(handler, call_number=5))
self.emitter.emit('foo')
- self.assertEqual([k['call_number'] for k in self.hook_calls],
- [1, 2, 3, 4, 5])
+ assert [k['call_number'] for k in self.hook_calls] == [1, 2, 3, 4, 5]
def test_register_first_hierarchy(self):
def handler(call_number, **kwargs):
@@ -536,8 +533,7 @@ class TestWildcardHandlers(unittest.TestCase):
self.emitter.register('foo.bar', partial(handler, call_number=3))
self.emitter.emit('foo.bar')
- self.assertEqual([k['call_number'] for k in self.hook_calls],
- [1, 2, 3, 4, 5, 6])
+ assert [k['call_number'] for k in self.hook_calls] == [1, 2, 3, 4, 5, 6]
def test_register_last_hierarchy(self):
def handler(call_number, **kwargs):
@@ -548,8 +544,7 @@ class TestWildcardHandlers(unittest.TestCase):
self.emitter.register('foo', partial(handler, call_number=2))
self.emitter.register_first('foo', partial(handler, call_number=1))
self.emitter.emit('foo')
- self.assertEqual([k['call_number'] for k in self.hook_calls],
- [1, 2, 3])
+ assert [k['call_number'] for k in self.hook_calls] == [1, 2, 3]
def test_register_unregister_first_last(self):
self.emitter.register('foo', self.hook)
@@ -561,7 +556,7 @@ class TestWildcardHandlers(unittest.TestCase):
self.emitter.unregister('foo', self.hook)
self.emitter.emit('foo')
- self.assertEqual(self.hook_calls, [])
+ assert self.hook_calls == []
def test_copy_emitter(self):
# Here we're not testing copy directly, we're testing
@@ -577,24 +572,24 @@ class TestWildcardHandlers(unittest.TestCase):
self.emitter.register('foo.bar.baz', first_handler)
# First time we emit, only the first handler should be called.
self.emitter.emit('foo.bar.baz', id_name='first-time')
- self.assertEqual(first, ['first-time'])
- self.assertEqual(second, [])
+ assert first == ['first-time']
+ assert second == []
copied_emitter = copy.copy(self.emitter)
# If we emit from the copied emitter, we should still
# only see the first handler called.
copied_emitter.emit('foo.bar.baz', id_name='second-time')
- self.assertEqual(first, ['first-time', 'second-time'])
- self.assertEqual(second, [])
+ assert first == ['first-time', 'second-time']
+ assert second == []
# However, if we register an event handler with the copied
# emitter, the first emitter will not see this.
copied_emitter.register('foo.bar.baz', second_handler)
copied_emitter.emit('foo.bar.baz', id_name='third-time')
- self.assertEqual(first, ['first-time', 'second-time', 'third-time'])
+ assert first == ['first-time', 'second-time', 'third-time']
# And now the second handler is called.
- self.assertEqual(second, ['third-time'])
+ assert second == ['third-time']
# And vice-versa, emitting from the original emitter
# will not trigger the second_handler.
@@ -603,7 +598,7 @@ class TestWildcardHandlers(unittest.TestCase):
self.emitter.unregister('foo.bar.baz', first_handler)
self.emitter.register('foo.bar.baz', first_handler)
self.emitter.emit('foo.bar.baz', id_name='last-time')
- self.assertEqual(second, ['third-time'])
+ assert second == ['third-time']
def test_copy_emitter_with_unique_id_event(self):
# Here we're not testing copy directly, we're testing
@@ -618,8 +613,8 @@ class TestWildcardHandlers(unittest.TestCase):
self.emitter.register('foo', first_handler, 'bar')
self.emitter.emit('foo', id_name='first-time')
- self.assertEqual(first, ['first-time'])
- self.assertEqual(second, [])
+ assert first == ['first-time']
+ assert second == []
copied_emitter = copy.copy(self.emitter)
@@ -628,22 +623,22 @@ class TestWildcardHandlers(unittest.TestCase):
# because the unique id was already used.
copied_emitter.register('foo', second_handler, 'bar')
copied_emitter.emit('foo', id_name='second-time')
- self.assertEqual(first, ['first-time', 'second-time'])
- self.assertEqual(second, [])
+ assert first == ['first-time', 'second-time']
+ assert second == []
# If we unregister the first event from the copied emitter,
# We should be able to register the second handler.
copied_emitter.unregister('foo', first_handler, 'bar')
copied_emitter.register('foo', second_handler, 'bar')
copied_emitter.emit('foo', id_name='third-time')
- self.assertEqual(first, ['first-time', 'second-time'])
- self.assertEqual(second, ['third-time'])
+ assert first == ['first-time', 'second-time']
+ assert second == ['third-time']
# The original event emitter should have the unique id event still
# registered though.
self.emitter.emit('foo', id_name='fourth-time')
- self.assertEqual(first, ['first-time', 'second-time', 'fourth-time'])
- self.assertEqual(second, ['third-time'])
+ assert first == ['first-time', 'second-time', 'fourth-time']
+ assert second == ['third-time']
def test_copy_events_with_partials(self):
# There's a bug in python2.6 where you can't deepcopy
@@ -655,8 +650,8 @@ class TestWildcardHandlers(unittest.TestCase):
f = functools.partial(handler, 1)
self.emitter.register('a.b', f)
copied = copy.copy(self.emitter)
- self.assertEqual(copied.emit_until_response(
- 'a.b', b='return-val')[1], 'return-val')
+ assert copied.emit_until_response(
+ 'a.b', b='return-val')[1] == 'return-val'
if __name__ == '__main__':
diff --git a/tests/unit/test_http_client_exception_mapping.py b/tests/unit/test_http_client_exception_mapping.py
index 2bcffac9..a401610c 100644
--- a/tests/unit/test_http_client_exception_mapping.py
+++ b/tests/unit/test_http_client_exception_mapping.py
@@ -1,4 +1,5 @@
import unittest
+import pytest
from botocore import exceptions as botocore_exceptions
from botocore.vendored.requests import exceptions as requests_exceptions
@@ -13,9 +14,9 @@ EXCEPTION_MAPPING = [
]
-class TestHttpClientExceptionMapping(unittest.TestCase):
- def test_http_client_exception_mapping(self):
- for new_exception, old_exception in EXCEPTION_MAPPING:
- with self.assertRaises(old_exception):
- raise new_exception(endpoint_url=None, proxy_url=None,
- error=None)
+class TestHttpClientExceptionMapping(object):
+ @pytest.mark.parametrize("new_exception, old_exception", EXCEPTION_MAPPING)
+ def test_http_client_exception_mapping(self, new_exception, old_exception):
+ with pytest.raises(old_exception):
+ raise new_exception(endpoint_url=None, proxy_url=None,
+ error=None)
diff --git a/tests/unit/test_http_session.py b/tests/unit/test_http_session.py
index c11eb701..1bde99ab 100644
--- a/tests/unit/test_http_session.py
+++ b/tests/unit/test_http_session.py
@@ -1,4 +1,5 @@
import socket
+import pytest
from tests import mock
from tests import unittest
@@ -27,39 +28,39 @@ class TestProxyConfiguration(unittest.TestCase):
def test_construct_proxy_headers_with_auth(self):
headers = self.proxy_config.proxy_headers_for(self.auth_url)
proxy_auth = headers.get('Proxy-Authorization')
- self.assertEqual('Basic dXNlcjpwYXNz', proxy_auth)
+ assert proxy_auth == 'Basic dXNlcjpwYXNz'
def test_construct_proxy_headers_without_auth(self):
headers = self.proxy_config.proxy_headers_for(self.url)
- self.assertEqual({}, headers)
+ assert headers == {}
def test_proxy_for_url_no_slashes(self):
self.update_http_proxy('localhost:8081/')
proxy_url = self.proxy_config.proxy_url_for(self.url)
- self.assertEqual('http://localhost:8081/', proxy_url)
+ assert proxy_url == 'http://localhost:8081/'
def test_proxy_for_url_no_protocol(self):
self.update_http_proxy('//localhost:8081/')
proxy_url = self.proxy_config.proxy_url_for(self.url)
- self.assertEqual('http://localhost:8081/', proxy_url)
+ assert proxy_url == 'http://localhost:8081/'
def test_fix_proxy_url_has_protocol_http(self):
proxy_url = self.proxy_config.proxy_url_for(self.url)
- self.assertEqual('http://localhost:8081/', proxy_url)
+ assert proxy_url == 'http://localhost:8081/'
class TestHttpSessionUtils(unittest.TestCase):
def test_get_cert_path_path(self):
path = '/some/path'
cert_path = get_cert_path(path)
- self.assertEqual(path, cert_path)
+ assert path == cert_path
def test_get_cert_path_certifi_or_default(self):
with mock.patch('botocore.httpsession.where') as where:
path = '/bundle/path'
where.return_value = path
cert_path = get_cert_path(True)
- self.assertEqual(path, cert_path)
+ assert path == cert_path
class TestURLLib3Session(unittest.TestCase):
@@ -202,7 +203,7 @@ class TestURLLib3Session(unittest.TestCase):
self.assert_proxy_manager_call(proxies['https'], proxy_headers={})
session.send(self.request.prepare())
# assert that we did not create another proxy manager
- self.assertEqual(self.proxy_manager_fun.call_count, 1)
+ assert self.proxy_manager_fun.call_count == 1
def test_basic_http_proxy_request(self):
proxies = {'http': 'http://proxy.com'}
@@ -215,14 +216,14 @@ class TestURLLib3Session(unittest.TestCase):
session = URLLib3Session()
session.send(self.request.prepare())
_, manager_kwargs = self.pool_manager_cls.call_args
- self.assertIsNotNone(manager_kwargs.get('ssl_context'))
+ assert manager_kwargs.get('ssl_context') is not None
def test_proxy_request_ssl_context_is_explicit(self):
proxies = {'http': 'http://proxy.com'}
session = URLLib3Session(proxies=proxies)
session.send(self.request.prepare())
_, proxy_kwargs = self.proxy_manager_fun.call_args
- self.assertIsNotNone(proxy_kwargs.get('ssl_context'))
+ assert proxy_kwargs.get('ssl_context') is not None
def test_session_forwards_socket_options_to_pool_manager(self):
socket_options = [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)]
@@ -249,12 +250,12 @@ class TestURLLib3Session(unittest.TestCase):
session.send(self.request.prepare())
def test_catches_new_connection_error(self):
- with self.assertRaises(EndpointConnectionError):
+ with pytest.raises(EndpointConnectionError):
error = NewConnectionError(None, None)
self.make_request_with_error(error)
def test_catches_bad_status_line(self):
- with self.assertRaises(ConnectionClosedError):
+ with pytest.raises(ConnectionClosedError):
error = ProtocolError(None)
self.make_request_with_error(error)
@@ -262,9 +263,9 @@ class TestURLLib3Session(unittest.TestCase):
session = URLLib3Session()
# ensure the pool manager is using the correct classes
http_class = self.pool_manager.pool_classes_by_scheme.get('http')
- self.assertIs(http_class, AWSHTTPConnectionPool)
+ assert http_class is AWSHTTPConnectionPool
https_class = self.pool_manager.pool_classes_by_scheme.get('https')
- self.assertIs(https_class, AWSHTTPSConnectionPool)
+ assert https_class is AWSHTTPSConnectionPool
def test_chunked_encoding_is_set_with_header(self):
session = URLLib3Session()
diff --git a/tests/unit/test_idempotency.py b/tests/unit/test_idempotency.py
index a679afd9..9c0f010e 100644
--- a/tests/unit/test_idempotency.py
+++ b/tests/unit/test_idempotency.py
@@ -29,11 +29,11 @@ class TestIdempotencyInjection(unittest.TestCase):
# No parameters are provided, RequiredKey should be autofilled
params = {}
generate_idempotent_uuid(params, self.mock_model)
- self.assertIn('RequiredKey', params)
- self.assertIsNotNone(self.uuid_pattern.match(params['RequiredKey']))
+ assert 'RequiredKey' in params
+ assert self.uuid_pattern.match(params['RequiredKey']) is not None
def test_provided(self):
# RequiredKey is provided, should not be replaced
params = {'RequiredKey': 'already populated'}
generate_idempotent_uuid(params, self.mock_model)
- self.assertEqual(params['RequiredKey'], 'already populated')
+ assert params['RequiredKey'] == 'already populated'
diff --git a/tests/unit/test_loaders.py b/tests/unit/test_loaders.py
index 7e7906a0..dc396f18 100644
--- a/tests/unit/test_loaders.py
+++ b/tests/unit/test_loaders.py
@@ -22,6 +22,7 @@
import os
import contextlib
import copy
+import pytest
from tests import mock
from botocore.exceptions import DataNotFoundError, UnknownServiceError
@@ -42,25 +43,25 @@ class TestJSONFileLoader(BaseEnvVar):
def test_load_file(self):
data = self.file_loader.load_file(self.valid_file_path)
- self.assertEqual(len(data), 3)
- self.assertTrue('test_key_1' in data)
+ assert len(data) == 3
+ assert 'test_key_1' in data
def test_load_json_file_does_not_exist_returns_none(self):
# None is used to indicate that the loader could not find a
# file to load.
- self.assertIsNone(self.file_loader.load_file('fooasdfasdfasdf'))
+ assert self.file_loader.load_file('fooasdfasdfasdf') is None
def test_file_exists_check(self):
- self.assertTrue(self.file_loader.exists(self.valid_file_path))
+ assert self.file_loader.exists(self.valid_file_path)
def test_file_does_not_exist_returns_false(self):
- self.assertFalse(self.file_loader.exists(
- os.path.join(self.data_path, 'does', 'not', 'exist')))
+ assert not self.file_loader.exists(
+ os.path.join(self.data_path, 'does', 'not', 'exist'))
def test_file_with_non_ascii(self):
try:
filename = os.path.join(self.data_path, 'non_ascii')
- self.assertTrue(self.file_loader.load_file(filename) is not None)
+ assert self.file_loader.load_file(filename) is not None
except UnicodeDecodeError:
self.fail('Fail to handle data file with non-ascii characters')
@@ -69,29 +70,26 @@ class TestLoader(BaseEnvVar):
def test_default_search_paths(self):
loader = Loader()
- self.assertEqual(len(loader.search_paths), 2)
+ assert len(loader.search_paths) == 2
# We should also have ~/.aws/models added to
# the search path. To deal with cross platform
# issues we'll just check for a path that ends
# with .aws/models.
home_dir_path = os.path.join('.aws', 'models')
- self.assertTrue(
- any(p.endswith(home_dir_path) for p in
- loader.search_paths))
+ assert any(p.endswith(home_dir_path) for p in
+ loader.search_paths)
def test_can_add_to_search_path(self):
loader = Loader()
loader.search_paths.append('mypath')
- self.assertIn('mypath', loader.search_paths)
+ assert 'mypath' in loader.search_paths
def test_can_initialize_with_search_paths(self):
loader = Loader(extra_search_paths=['foo', 'bar'])
# Note that the extra search paths are before
# the customer/builtin data paths.
- self.assertEqual(
- loader.search_paths,
- ['foo', 'bar', loader.CUSTOMER_DATA_PATH,
- loader.BUILTIN_DATA_PATH])
+ assert loader.search_paths == ['foo', 'bar',
+ loader.CUSTOMER_DATA_PATH, loader.BUILTIN_DATA_PATH]
# The file loader isn't consulted unless the current
# search path exists, so we're patching isdir to always
@@ -109,7 +107,7 @@ class TestLoader(BaseEnvVar):
loader = Loader(extra_search_paths=search_paths,
file_loader=FakeLoader())
loaded = loader.load_data('baz')
- self.assertEqual(loaded, ['loaded data'])
+ assert loaded == ['loaded data']
def test_data_not_found_raises_exception(self):
class FakeLoader(object):
@@ -118,14 +116,14 @@ class TestLoader(BaseEnvVar):
# loader couldn't find anything.
return None
loader = Loader(file_loader=FakeLoader())
- with self.assertRaises(DataNotFoundError):
+ with pytest.raises(DataNotFoundError):
loader.load_data('baz')
@mock.patch('os.path.isdir', mock.Mock(return_value=True))
def test_error_raised_if_service_does_not_exist(self):
loader = Loader(extra_search_paths=[],
include_default_search_paths=False)
- with self.assertRaises(DataNotFoundError):
+ with pytest.raises(DataNotFoundError):
loader.determine_latest_version('unknownservice', 'service-2')
@mock.patch('os.path.isdir', mock.Mock(return_value=True))
@@ -141,7 +139,7 @@ class TestLoader(BaseEnvVar):
loader.determine_latest_version = mock.Mock(return_value='2015-03-01')
loader.list_available_services = mock.Mock(return_value=['baz'])
loaded = loader.load_service_model('baz', type_name='service-2')
- self.assertEqual(loaded, ['loaded data'])
+ assert loaded == ['loaded data']
@mock.patch('os.path.isdir', mock.Mock(return_value=True))
def test_load_service_model_enforces_case(self):
@@ -157,8 +155,7 @@ class TestLoader(BaseEnvVar):
# Should have a) the unknown service name and b) list of valid
# service names.
- with six.assertRaisesRegex(self, UnknownServiceError,
- 'Unknown service.*BAZ.*baz'):
+ with pytest.raises(UnknownServiceError, match='Unknown service.*BAZ.*baz'):
loader.load_service_model('BAZ', type_name='service-2')
def test_load_service_model_uses_provided_type_name(self):
@@ -170,8 +167,7 @@ class TestLoader(BaseEnvVar):
# Should have a) the unknown service name and b) list of valid
# service names.
provided_type_name = 'not-service-2'
- with six.assertRaisesRegex(self, UnknownServiceError,
- 'Unknown service.*BAZ.*baz'):
+ with pytest.raises(UnknownServiceError, match='Unknown service.*BAZ.*baz'):
loader.load_service_model(
'BAZ', type_name=provided_type_name)
@@ -180,9 +176,9 @@ class TestLoader(BaseEnvVar):
def test_create_loader_parses_data_path(self):
search_path = os.pathsep.join(['foo', 'bar', 'baz'])
loader = create_loader(search_path)
- self.assertIn('foo', loader.search_paths)
- self.assertIn('bar', loader.search_paths)
- self.assertIn('baz', loader.search_paths)
+ assert 'foo' in loader.search_paths
+ assert 'bar' in loader.search_paths
+ assert 'baz' in loader.search_paths
class TestMergeExtras(BaseEnvVar):
@@ -212,7 +208,7 @@ class TestMergeExtras(BaseEnvVar):
loaded = self.data_loader.load_service_model('myservice', 'service-2')
expected = {'foo': 'sdk', 'bar': 'service'}
- self.assertEqual(loaded, expected)
+ assert loaded == expected
call_args = self.file_loader.load_file.call_args_list
call_args = [c[0][0] for c in call_args]
@@ -221,7 +217,7 @@ class TestMergeExtras(BaseEnvVar):
os.path.join(base_path, 'service-2'),
os.path.join(base_path, 'service-2.sdk-extras')
]
- self.assertEqual(call_args, expected_call_args)
+ assert call_args == expected_call_args
def test_extras_not_found(self):
service_data = {'foo': 'service', 'bar': 'service'}
@@ -229,7 +225,7 @@ class TestMergeExtras(BaseEnvVar):
self.file_loader.load_file.side_effect = [service_data, None]
loaded = self.data_loader.load_service_model('myservice', 'service-2')
- self.assertEqual(loaded, service_data_copy)
+ assert loaded == service_data_copy
def test_no_merge_in_extras(self):
service_data = {'foo': 'service', 'bar': 'service'}
@@ -237,7 +233,7 @@ class TestMergeExtras(BaseEnvVar):
self.file_loader.load_file.side_effect = [service_data, {}]
loaded = self.data_loader.load_service_model('myservice', 'service-2')
- self.assertEqual(loaded, service_data_copy)
+ assert loaded == service_data_copy
def test_include_default_extras(self):
self.data_loader = Loader(
@@ -255,7 +251,7 @@ class TestMergeExtras(BaseEnvVar):
self.file_loader.load_file.side_effect = [service_data, sdk_extras]
loaded = self.data_loader.load_service_model('myservice', 'service-2')
- self.assertEqual(loaded, service_data_copy)
+ assert loaded == service_data_copy
def test_append_extra_type(self):
service_data = {'foo': 'service', 'bar': 'service'}
@@ -268,7 +264,7 @@ class TestMergeExtras(BaseEnvVar):
loaded = self.data_loader.load_service_model('myservice', 'service-2')
expected = {'foo': 'sdk', 'bar': 'service', 'cli': True}
- self.assertEqual(loaded, expected)
+ assert loaded == expected
call_args = self.file_loader.load_file.call_args_list
call_args = [c[0][0] for c in call_args]
@@ -278,7 +274,7 @@ class TestMergeExtras(BaseEnvVar):
os.path.join(base_path, 'service-2.sdk-extras'),
os.path.join(base_path, 'service-2.cli-extras')
]
- self.assertEqual(call_args, expected_call_args)
+ assert call_args == expected_call_args
def test_sdk_empty_extras_skipped(self):
service_data = {'foo': 'service', 'bar': 'service'}
@@ -290,7 +286,7 @@ class TestMergeExtras(BaseEnvVar):
loaded = self.data_loader.load_service_model('myservice', 'service-2')
expected = {'foo': 'cli', 'bar': 'service'}
- self.assertEqual(loaded, expected)
+ assert loaded == expected
class TestExtrasProcessor(BaseEnvVar):
@@ -306,19 +302,19 @@ class TestExtrasProcessor(BaseEnvVar):
def test_process_empty_list(self):
self.processor.process(self.service_data, [])
- self.assertEqual(self.service_data, self.service_data_copy)
+ assert self.service_data == self.service_data_copy
def test_process_empty_extras(self):
self.processor.process(self.service_data, [{}])
- self.assertEqual(self.service_data, self.service_data_copy)
+ assert self.service_data == self.service_data_copy
def test_process_merge_key(self):
extras = {'merge': {'shapes': {'BooleanShape': {'type': 'boolean'}}}}
self.processor.process(self.service_data, [extras])
- self.assertNotEqual(self.service_data, self.service_data_copy)
+ assert self.service_data != self.service_data_copy
boolean_shape = self.service_data['shapes'].get('BooleanShape')
- self.assertEqual(boolean_shape, {'type': 'boolean'})
+ assert boolean_shape == {'type': 'boolean'}
def test_process_in_order(self):
extras = [
@@ -326,10 +322,10 @@ class TestExtrasProcessor(BaseEnvVar):
{'merge': {'shapes': {'BooleanShape': {'type': 'string'}}}}
]
self.processor.process(self.service_data, extras)
- self.assertNotEqual(self.service_data, self.service_data_copy)
+ assert self.service_data != self.service_data_copy
boolean_shape = self.service_data['shapes'].get('BooleanShape')
- self.assertEqual(boolean_shape, {'type': 'string'})
+ assert boolean_shape == {'type': 'string'}
class TestLoadersWithDirectorySearching(BaseEnvVar):
@@ -394,12 +390,9 @@ class TestLoadersWithDirectorySearching(BaseEnvVar):
},
}
with self.loader_with_fake_dirs() as loader:
- self.assertEqual(
- loader.list_available_services(type_name='service-2'),
- ['dynamodb', 'ec2'])
- self.assertEqual(
- loader.list_available_services(type_name='resource-1'),
- ['rds'])
+ assert loader.list_available_services(type_name='service-2') == [
+ 'dynamodb', 'ec2']
+ assert loader.list_available_services(type_name='resource-1') == ['rds']
def test_determine_latest(self):
# Fake mapping of directories to subdirectories.
@@ -428,7 +421,5 @@ class TestLoadersWithDirectorySearching(BaseEnvVar):
}
with self.loader_with_fake_dirs() as loader:
latest = loader.determine_latest_version('ec2', 'service-2')
- self.assertEqual(loader.determine_latest_version('ec2', 'service-2'),
- '2014-10-01')
- self.assertEqual(loader.determine_latest_version('ec2', 'service-1'),
- '2015-03-01')
+ assert loader.determine_latest_version('ec2', 'service-2') == '2014-10-01'
+ assert loader.determine_latest_version('ec2', 'service-1') == '2015-03-01'
diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py
index fe1511de..65243bfb 100644
--- a/tests/unit/test_model.py
+++ b/tests/unit/test_model.py
@@ -1,4 +1,5 @@
from tests import unittest
+import pytest
from botocore import model
from botocore.compat import OrderedDict
@@ -33,12 +34,10 @@ def test_missing_model_attribute_raises_exception():
class TestServiceId(unittest.TestCase):
def test_hypenize_replaces_spaces(self):
- self.assertEqual(
- model.ServiceId('my service').hyphenize(), 'my-service'
- )
+ assert model.ServiceId('my service').hyphenize() == 'my-service'
def test_hyphenize_lower_cases(self):
- self.assertEqual(model.ServiceId('MyService').hyphenize(), 'myservice')
+ assert model.ServiceId('MyService').hyphenize() == 'myservice'
class TestServiceModel(unittest.TestCase):
@@ -74,22 +73,21 @@ class TestServiceModel(unittest.TestCase):
def test_metadata_available(self):
# You should be able to access the metadata in a service description
# through the service model object.
- self.assertEqual(self.service_model.metadata.get('protocol'), 'query')
+ assert self.service_model.metadata.get('protocol') == 'query'
def test_service_name_can_be_overriden(self):
service_model = model.ServiceModel(self.model,
service_name='myservice')
- self.assertEqual(service_model.service_name, 'myservice')
+ assert service_model.service_name == 'myservice'
def test_service_name_defaults_to_endpoint_prefix(self):
- self.assertEqual(self.service_model.service_name, 'endpoint-prefix')
+ assert self.service_model.service_name == 'endpoint-prefix'
def test_service_id(self):
- self.assertEqual(self.service_model.service_id, 'MyService')
+ assert self.service_model.service_id == 'MyService'
def test_hyphenize_service_id(self):
- self.assertEqual(
- self.service_model.service_id.hyphenize(), 'myservice')
+ assert self.service_model.service_id.hyphenize() == 'myservice'
def test_service_id_does_not_exist(self):
service_model = {
@@ -105,8 +103,7 @@ class TestServiceModel(unittest.TestCase):
}
service_name = 'myservice'
service_model = model.ServiceModel(service_model, service_name)
- with six.assertRaisesRegex(self, model.UndefinedModelAttributeError,
- service_name):
+ with pytest.raises(model.UndefinedModelAttributeError, match=service_name):
service_model.service_id()
def test_operation_does_not_exist(self):
@@ -114,35 +111,33 @@ class TestServiceModel(unittest.TestCase):
self.service_model.operation_model('NoExistOperation')
def test_signing_name_defaults_to_endpoint_prefix(self):
- self.assertEqual(self.service_model.signing_name, 'endpoint-prefix')
+ assert self.service_model.signing_name == 'endpoint-prefix'
def test_documentation_exposed_as_property(self):
- self.assertEqual(self.service_model.documentation,
- 'Documentation value')
+ assert self.service_model.documentation == 'Documentation value'
def test_shape_names(self):
- self.assertEqual(self.service_model.shape_names, ['StringShape'])
+ assert self.service_model.shape_names == ['StringShape']
def test_repr_has_service_name(self):
- self.assertEqual(repr(self.service_model),
- 'ServiceModel(endpoint-prefix)')
+ assert repr(self.service_model) == 'ServiceModel(endpoint-prefix)'
def test_shape_for_error_code(self):
self.model['shapes'].update(self.error_shapes)
self.service_model = model.ServiceModel(self.model)
shape = self.service_model.shape_for_error_code('ExceptionOne')
- self.assertEqual(shape.name, 'ExceptionOne')
+ assert shape.name == 'ExceptionOne'
shape = self.service_model.shape_for_error_code('FooCode')
- self.assertEqual(shape.name, 'ExceptionTwo')
+ assert shape.name == 'ExceptionTwo'
def test_error_shapes(self):
self.model['shapes'].update(self.error_shapes)
self.service_model = model.ServiceModel(self.model)
error_shapes = self.service_model.error_shapes
error_shape_names = [shape.name for shape in error_shapes]
- self.assertEqual(len(error_shape_names), 2)
- self.assertIn('ExceptionOne', error_shape_names)
- self.assertIn('ExceptionTwo', error_shape_names)
+ assert len(error_shape_names) == 2
+ assert 'ExceptionOne' in error_shape_names
+ assert 'ExceptionTwo' in error_shape_names
class TestOperationModelFromService(unittest.TestCase):
@@ -214,8 +209,8 @@ class TestOperationModelFromService(unittest.TestCase):
service_model = model.ServiceModel(self.model)
operation = model.OperationModel(
self.model['operations']['OperationName'], service_model, 'Foo')
- self.assertEqual(operation.name, 'Foo')
- self.assertEqual(operation.wire_name, 'OperationName')
+ assert operation.name == 'Foo'
+ assert operation.wire_name == 'OperationName'
def test_operation_name_in_repr(self):
service_model = model.ServiceModel(self.model)
@@ -226,52 +221,51 @@ class TestOperationModelFromService(unittest.TestCase):
service_model = model.ServiceModel(self.model)
operation = model.OperationModel(
self.model['operations']['OperationName'], service_model)
- self.assertEqual(operation.name, 'OperationName')
- self.assertEqual(operation.wire_name, 'OperationName')
+ assert operation.name == 'OperationName'
+ assert operation.wire_name == 'OperationName'
def test_name_from_service(self):
service_model = model.ServiceModel(self.model)
operation = service_model.operation_model('OperationName')
- self.assertEqual(operation.name, 'OperationName')
+ assert operation.name == 'OperationName'
def test_name_from_service_model_when_differs_from_name(self):
self.model['operations']['Foo'] = \
self.model['operations']['OperationName']
service_model = model.ServiceModel(self.model)
operation = service_model.operation_model('Foo')
- self.assertEqual(operation.name, 'Foo')
+ assert operation.name == 'Foo'
def test_operation_input_model(self):
service_model = model.ServiceModel(self.model)
operation = service_model.operation_model('OperationName')
- self.assertEqual(operation.name, 'OperationName')
+ assert operation.name == 'OperationName'
# Operations should also have a reference to the top level metadata.
- self.assertEqual(operation.metadata['protocol'], 'query')
- self.assertEqual(operation.http['method'], 'POST')
- self.assertEqual(operation.http['requestUri'], '/')
+ assert operation.metadata['protocol'] == 'query'
+ assert operation.http['method'] == 'POST'
+ assert operation.http['requestUri'] == '/'
shape = operation.input_shape
- self.assertEqual(shape.name, 'OperationNameRequest')
- self.assertEqual(list(sorted(shape.members)), ['Arg1', 'Arg2'])
+ assert shape.name == 'OperationNameRequest'
+ assert list(sorted(shape.members)) == ['Arg1', 'Arg2']
def test_has_documentation_property(self):
service_model = model.ServiceModel(self.model)
operation = service_model.operation_model('OperationName')
- self.assertEqual(operation.documentation, 'Docs for OperationName')
+ assert operation.documentation == 'Docs for OperationName'
def test_service_model_available_from_operation_model(self):
service_model = model.ServiceModel(self.model)
operation = service_model.operation_model('OperationName')
# This is an identity comparison because we don't implement
# __eq__, so we may need to change this in the future.
- self.assertEqual(
- operation.service_model, service_model)
+ assert operation.service_model == service_model
def test_operation_output_model(self):
service_model = model.ServiceModel(self.model)
operation = service_model.operation_model('OperationName')
output = operation.output_shape
- self.assertEqual(list(output.members), ['String'])
- self.assertFalse(operation.has_streaming_output)
+ assert list(output.members) == ['String']
+ assert not operation.has_streaming_output
def test_operation_shape_not_required(self):
# It's ok if there's no output shape. We'll just get a return value of
@@ -280,84 +274,83 @@ class TestOperationModelFromService(unittest.TestCase):
service_model = model.ServiceModel(self.model)
operation = service_model.operation_model('OperationName')
output_shape = operation.output_shape
- self.assertIsNone(output_shape)
+ assert output_shape is None
def test_error_shapes(self):
service_model = model.ServiceModel(self.model)
operation = service_model.operation_model('OperationName')
# OperationName only has a NoSuchResourceException
- self.assertEqual(len(operation.error_shapes), 1)
- self.assertEqual(
- operation.error_shapes[0].name, 'NoSuchResourceException')
+ assert len(operation.error_shapes) == 1
+ assert operation.error_shapes[0].name == 'NoSuchResourceException'
def test_has_auth_type(self):
operation = self.service_model.operation_model('OperationName')
- self.assertEqual(operation.auth_type, 'v4')
+ assert operation.auth_type == 'v4'
def test_auth_type_not_set(self):
operation = self.service_model.operation_model('OperationTwo')
- self.assertIsNone(operation.auth_type)
+ assert operation.auth_type is None
def test_deprecated_present(self):
self.model['operations']['OperationName']['deprecated'] = True
service_model = model.ServiceModel(self.model)
operation_name = service_model.operation_model('OperationName')
- self.assertTrue(operation_name.deprecated)
+ assert operation_name.deprecated
def test_deprecated_present_false(self):
self.model['operations']['OperationName']['deprecated'] = False
service_model = model.ServiceModel(self.model)
operation_name = service_model.operation_model('OperationName')
- self.assertFalse(operation_name.deprecated)
+ assert not operation_name.deprecated
def test_deprecated_absent(self):
service_model = model.ServiceModel(self.model)
operation_two = service_model.operation_model('OperationTwo')
- self.assertFalse(operation_two.deprecated)
+ assert not operation_two.deprecated
def test_endpoint_operation_present(self):
self.model['operations']['OperationName']['endpointoperation'] = True
service_model = model.ServiceModel(self.model)
operation_name = service_model.operation_model('OperationName')
- self.assertTrue(operation_name.is_endpoint_discovery_operation)
+ assert operation_name.is_endpoint_discovery_operation
def test_endpoint_operation_present_false(self):
self.model['operations']['OperationName']['endpointoperation'] = False
service_model = model.ServiceModel(self.model)
operation_name = service_model.operation_model('OperationName')
- self.assertFalse(operation_name.is_endpoint_discovery_operation)
+ assert not operation_name.is_endpoint_discovery_operation
def test_endpoint_operation_absent(self):
operation_two = self.service_model.operation_model('OperationName')
- self.assertFalse(operation_two.is_endpoint_discovery_operation)
+ assert not operation_two.is_endpoint_discovery_operation
def test_endpoint_discovery_required(self):
operation = self.model['operations']['OperationName']
operation['endpointdiscovery'] = {'required': True}
service_model = model.ServiceModel(self.model)
- self.assertTrue(service_model.endpoint_discovery_required)
+ assert service_model.endpoint_discovery_required
def test_endpoint_discovery_required_false(self):
self.model['operations']['OperationName']['endpointdiscovery'] = {}
service_model = model.ServiceModel(self.model)
- self.assertFalse(service_model.endpoint_discovery_required)
+ assert not service_model.endpoint_discovery_required
def test_endpoint_discovery_required_no_value(self):
operation = self.model['operations']['OperationName']
- self.assertTrue(operation.get('endpointdiscovery') is None)
+ assert operation.get('endpointdiscovery') is None
service_model = model.ServiceModel(self.model)
- self.assertFalse(service_model.endpoint_discovery_required)
+ assert not service_model.endpoint_discovery_required
def test_endpoint_discovery_present(self):
operation = self.model['operations']['OperationName']
operation['endpointdiscovery'] = {'required': True}
service_model = model.ServiceModel(self.model)
operation_name = service_model.operation_model('OperationName')
- self.assertTrue(operation_name.endpoint_discovery.get('required'))
+ assert operation_name.endpoint_discovery.get('required')
def test_endpoint_discovery_absent(self):
operation_name = self.service_model.operation_model('OperationName')
- self.assertIsNone(operation_name.endpoint_discovery)
+ assert operation_name.endpoint_discovery is None
class TestOperationModelEventStreamTypes(unittest.TestCase):
@@ -441,38 +434,38 @@ class TestOperationModelEventStreamTypes(unittest.TestCase):
def test_event_stream_input_for_operation(self):
service_model = model.ServiceModel(self.model)
operation = service_model.operation_model('OperationName')
- self.assertTrue(operation.has_event_stream_input)
+ assert operation.has_event_stream_input
event_stream_input = operation.get_event_stream_input()
- self.assertEqual(event_stream_input.name, 'EventStreamStructure')
+ assert event_stream_input.name == 'EventStreamStructure'
def test_no_event_stream_input_for_operation(self):
self.update_operation(input={'shape': 'NormalStructure'})
service_model = model.ServiceModel(self.model)
operation = service_model.operation_model('OperationName')
- self.assertFalse(operation.has_event_stream_input)
- self.assertEqual(operation.get_event_stream_input(), None)
+ assert not operation.has_event_stream_input
+ assert operation.get_event_stream_input() is None
def test_event_stream_output_for_operation(self):
service_model = model.ServiceModel(self.model)
operation = service_model.operation_model('OperationName')
- self.assertTrue(operation.has_event_stream_output)
+ assert operation.has_event_stream_output
output = operation.get_event_stream_output()
- self.assertEqual(output.name, 'EventStreamStructure')
+ assert output.name == 'EventStreamStructure'
def test_no_event_stream_output_for_operation(self):
self.update_operation(output={'shape': 'NormalStructure'})
service_model = model.ServiceModel(self.model)
operation = service_model.operation_model('OperationName')
- self.assertFalse(operation.has_event_stream_output)
- self.assertEqual(operation.get_event_stream_output(), None)
+ assert not operation.has_event_stream_output
+ assert operation.get_event_stream_output() is None
def test_no_output_shape(self):
self.update_operation(output=None)
del self.model['operations']['OperationName']['output']
service_model = model.ServiceModel(self.model)
operation = service_model.operation_model('OperationName')
- self.assertFalse(operation.has_event_stream_output)
- self.assertEqual(operation.get_event_stream_output(), None)
+ assert not operation.has_event_stream_output
+ assert operation.get_event_stream_output() is None
class TestOperationModelStreamingTypes(unittest.TestCase):
@@ -532,28 +525,28 @@ class TestOperationModelStreamingTypes(unittest.TestCase):
def test_streaming_input_for_operation(self):
service_model = model.ServiceModel(self.model)
operation = service_model.operation_model('OperationName')
- self.assertTrue(operation.has_streaming_input)
- self.assertEqual(operation.get_streaming_input().name, 'blobType')
+ assert operation.has_streaming_input
+ assert operation.get_streaming_input().name == 'blobType'
def test_not_streaming_input_for_operation(self):
self.remove_payload('Request')
service_model = model.ServiceModel(self.model)
operation = service_model.operation_model('OperationName')
- self.assertFalse(operation.has_streaming_input)
- self.assertEqual(operation.get_streaming_input(), None)
+ assert not operation.has_streaming_input
+ assert operation.get_streaming_input() is None
def test_streaming_output_for_operation(self):
service_model = model.ServiceModel(self.model)
operation = service_model.operation_model('OperationName')
- self.assertTrue(operation.has_streaming_output)
- self.assertEqual(operation.get_streaming_output().name, 'blobType')
+ assert operation.has_streaming_output
+ assert operation.get_streaming_output().name == 'blobType'
def test_not_streaming_output_for_operation(self):
self.remove_payload('Response')
service_model = model.ServiceModel(self.model)
operation = service_model.operation_model('OperationName')
- self.assertFalse(operation.has_streaming_output)
- self.assertEqual(operation.get_streaming_output(), None)
+ assert not operation.has_streaming_output
+ assert operation.get_streaming_output() is None
class TestDeepMerge(unittest.TestCase):
@@ -590,24 +583,24 @@ class TestDeepMerge(unittest.TestCase):
# map_merged has a serialization as a member trait as well as
# in the StrToStrMap.
# The member trait should have precedence.
- self.assertEqual(map_merged.serialization,
+ assert map_merged.serialization == {
# member beats the definition.
- {'name': 'Attribute',
+ 'name': 'Attribute',
# From the definition.
- 'flattened': True,})
+ 'flattened': True,}
# Ensure we don't merge/mutate the original dicts.
- self.assertEqual(map_merged.key.serialization['name'], 'Name')
- self.assertEqual(map_merged.value.serialization['name'], 'Value')
- self.assertEqual(map_merged.key.serialization['name'], 'Name')
+ assert map_merged.key.serialization['name'] == 'Name'
+ assert map_merged.value.serialization['name'] == 'Value'
+ assert map_merged.key.serialization['name'] == 'Name'
def test_merges_copy_dict(self):
shape = self.shape_resolver.get_shape_by_name('SetQueueAttributes')
map_merged = shape.members['MapExample']
- self.assertEqual(map_merged.serialization.get('name'), 'Attribute')
+ assert map_merged.serialization.get('name') == 'Attribute'
shape2 = self.shape_resolver.get_shape_by_name('SetQueueAttributes2')
map_merged2 = shape2.members['MapExample']
- self.assertEqual(map_merged2.serialization.get('name'), 'Attribute2')
+ assert map_merged2.serialization.get('name') == 'Attribute2'
class TestShapeResolver(unittest.TestCase):
@@ -626,8 +619,8 @@ class TestShapeResolver(unittest.TestCase):
}
resolver = model.ShapeResolver(shape_map)
shape = resolver.get_shape_by_name('Foo')
- self.assertEqual(shape.name, 'Foo')
- self.assertEqual(shape.type_name, 'structure')
+ assert shape.name == 'Foo'
+ assert shape.type_name == 'structure'
def test_resolve_shape_reference(self):
shape_map = {
@@ -644,8 +637,8 @@ class TestShapeResolver(unittest.TestCase):
}
resolver = model.ShapeResolver(shape_map)
shape = resolver.resolve_shape_ref({'shape': 'StringType'})
- self.assertEqual(shape.name, 'StringType')
- self.assertEqual(shape.type_name, 'string')
+ assert shape.name == 'StringType'
+ assert shape.type_name == 'string'
def test_resolve_shape_references_with_member_traits(self):
shape_map = {
@@ -663,8 +656,8 @@ class TestShapeResolver(unittest.TestCase):
resolver = model.ShapeResolver(shape_map)
shape = resolver.resolve_shape_ref({'shape': 'StringType',
'locationName': 'other'})
- self.assertEqual(shape.serialization['name'], 'other')
- self.assertEqual(shape.name, 'StringType')
+ assert shape.serialization['name'] == 'other'
+ assert shape.name == 'StringType'
def test_serialization_cache(self):
shape_map = {
@@ -681,12 +674,12 @@ class TestShapeResolver(unittest.TestCase):
resolver = model.ShapeResolver(shape_map)
shape = resolver.resolve_shape_ref({'shape': 'StringType',
'locationName': 'other'})
- self.assertEqual(shape.serialization['name'], 'other')
+ assert shape.serialization['name'] == 'other'
# serialization is computed on demand, and a cache is kept.
# This is just verifying that trying to access serialization again
# gives the same result. We don't actually care that it's cached,
# we just care that the cache doesn't mess with correctness.
- self.assertEqual(shape.serialization['name'], 'other')
+ assert shape.serialization['name'] == 'other'
def test_shape_overrides(self):
shape_map = {
@@ -697,11 +690,11 @@ class TestShapeResolver(unittest.TestCase):
}
resolver = model.ShapeResolver(shape_map)
shape = resolver.get_shape_by_name('StringType')
- self.assertEqual(shape.documentation, 'Original documentation')
+ assert shape.documentation == 'Original documentation'
shape = resolver.resolve_shape_ref({'shape': 'StringType',
'documentation': 'override'})
- self.assertEqual(shape.documentation, 'override')
+ assert shape.documentation == 'override'
def test_shape_type_structure(self):
shapes = {
@@ -718,13 +711,12 @@ class TestShapeResolver(unittest.TestCase):
}
resolver = model.ShapeResolver(shapes)
shape = resolver.get_shape_by_name('ChangePasswordRequest')
- self.assertEqual(shape.type_name, 'structure')
- self.assertEqual(shape.name, 'ChangePasswordRequest')
- self.assertEqual(list(sorted(shape.members)),
- ['NewPassword', 'OldPassword'])
- self.assertEqual(shape.members['OldPassword'].name, 'passwordType')
- self.assertEqual(shape.members['OldPassword'].type_name, 'string')
- self.assertEqual(shape.error_code, None)
+ assert shape.type_name == 'structure'
+ assert shape.name == 'ChangePasswordRequest'
+ assert list(sorted(shape.members)) == ['NewPassword', 'OldPassword']
+ assert shape.members['OldPassword'].name == 'passwordType'
+ assert shape.members['OldPassword'].type_name == 'string'
+ assert shape.error_code is None
def test_exception_error_code(self):
shapes = {
@@ -737,14 +729,14 @@ class TestShapeResolver(unittest.TestCase):
# Test without explicit error code
resolver = model.ShapeResolver(shapes)
shape = resolver.get_shape_by_name('FooException')
- self.assertTrue(shape.metadata['exception'])
- self.assertEqual(shape.error_code, 'FooException')
+ assert shape.metadata['exception']
+ assert shape.error_code == 'FooException'
# Test with explicit error code
shapes['FooException']['error'] = {'code': 'ExceptionCode'}
resolver = model.ShapeResolver(shapes)
shape = resolver.get_shape_by_name('FooException')
- self.assertTrue(shape.metadata['exception'])
- self.assertEqual(shape.error_code, 'ExceptionCode')
+ assert shape.metadata['exception']
+ assert shape.error_code == 'ExceptionCode'
def test_shape_metadata(self):
shapes = {
@@ -765,12 +757,11 @@ class TestShapeResolver(unittest.TestCase):
}
resolver = model.ShapeResolver(shapes)
shape = resolver.get_shape_by_name('ChangePasswordRequest')
- self.assertEqual(shape.metadata['required'],
- ['OldPassword', 'NewPassword'])
+ assert shape.metadata['required'] == ['OldPassword', 'NewPassword']
member = shape.members['OldPassword']
- self.assertEqual(member.metadata['min'], 1)
- self.assertEqual(member.metadata['max'], 128)
- self.assertEqual(member.metadata['sensitive'], True)
+ assert member.metadata['min'] == 1
+ assert member.metadata['max'] == 128
+ assert member.metadata['sensitive']
def test_error_shape_metadata(self):
shapes = {
@@ -787,10 +778,8 @@ class TestShapeResolver(unittest.TestCase):
}
resolver = model.ShapeResolver(shapes)
shape = resolver.get_shape_by_name('ResourceNotFoundException')
- self.assertEqual(
- shape.metadata,
- {'exception': True, 'retryable': {'throttling': True}}
- )
+ assert shape.metadata == {
+ 'exception': True, 'retryable': {'throttling': True}}
def test_shape_list(self):
shapes = {
@@ -810,13 +799,13 @@ class TestShapeResolver(unittest.TestCase):
}
resolver = model.ShapeResolver(shapes)
shape = resolver.get_shape_by_name('mfaDeviceListType')
- self.assertEqual(shape.member.type_name, 'structure')
- self.assertEqual(shape.member.name, 'MFADevice')
- self.assertEqual(list(shape.member.members), ['UserName'])
+ assert shape.member.type_name == 'structure'
+ assert shape.member.name == 'MFADevice'
+ assert list(shape.member.members) == ['UserName']
def test_shape_does_not_exist(self):
resolver = model.ShapeResolver({})
- with self.assertRaises(model.NoShapeFoundError):
+ with pytest.raises(model.NoShapeFoundError):
resolver.get_shape_by_name('NoExistShape')
def test_missing_type_key(self):
@@ -826,7 +815,7 @@ class TestShapeResolver(unittest.TestCase):
}
}
resolver = model.ShapeResolver(shapes)
- with self.assertRaises(model.InvalidShapeError):
+ with pytest.raises(model.InvalidShapeError):
resolver.get_shape_by_name('UnknownType')
def test_bad_shape_ref(self):
@@ -842,7 +831,7 @@ class TestShapeResolver(unittest.TestCase):
}
}
resolver = model.ShapeResolver(shapes)
- with self.assertRaises(model.InvalidShapeReferenceError):
+ with pytest.raises(model.InvalidShapeReferenceError):
struct = resolver.get_shape_by_name('Struct')
# Resolving the members will fail because
# the 'A' and 'B' members are not shape refs.
@@ -855,8 +844,7 @@ class TestShapeResolver(unittest.TestCase):
}
}
resolver = model.ShapeResolver(shapes)
- self.assertIn('StringType',
- repr(resolver.get_shape_by_name('StringType')))
+ assert 'StringType' in repr(resolver.get_shape_by_name('StringType'))
class TestBuilders(unittest.TestCase):
@@ -867,10 +855,10 @@ class TestBuilders(unittest.TestCase):
'A': {'type': 'string'},
'B': {'type': 'integer'},
}).build_model()
- self.assertIsInstance(shape, model.StructureShape)
- self.assertEqual(sorted(list(shape.members)), ['A', 'B'])
- self.assertEqual(shape.members['A'].type_name, 'string')
- self.assertEqual(shape.members['B'].type_name, 'integer')
+ assert isinstance(shape, model.StructureShape)
+ assert sorted(list(shape.members)) == ['A', 'B']
+ assert shape.members['A'].type_name == 'string'
+ assert shape.members['B'].type_name == 'integer'
def test_structure_shape_with_structure_type(self):
b = model.DenormalizedStructureBuilder()
@@ -882,10 +870,10 @@ class TestBuilders(unittest.TestCase):
}
},
}).build_model()
- self.assertIsInstance(shape, model.StructureShape)
- self.assertEqual(list(shape.members), ['A'])
- self.assertEqual(shape.members['A'].type_name, 'structure')
- self.assertEqual(list(shape.members['A'].members), ['A-1'])
+ assert isinstance(shape, model.StructureShape)
+ assert list(shape.members) == ['A']
+ assert shape.members['A'].type_name == 'structure'
+ assert list(shape.members['A'].members) == ['A-1']
def test_structure_shape_with_list(self):
b = model.DenormalizedStructureBuilder()
@@ -897,8 +885,8 @@ class TestBuilders(unittest.TestCase):
}
},
}).build_model()
- self.assertIsInstance(shape.members['A'], model.ListShape)
- self.assertEqual(shape.members['A'].member.type_name, 'string')
+ assert isinstance(shape.members['A'], model.ListShape)
+ assert shape.members['A'].member.type_name == 'string'
def test_structure_shape_with_map_type(self):
b = model.DenormalizedStructureBuilder()
@@ -909,10 +897,10 @@ class TestBuilders(unittest.TestCase):
'value': {'type': 'string'},
}
}).build_model()
- self.assertIsInstance(shape.members['A'], model.MapShape)
+ assert isinstance(shape.members['A'], model.MapShape)
map_shape = shape.members['A']
- self.assertEqual(map_shape.key.type_name, 'string')
- self.assertEqual(map_shape.value.type_name, 'string')
+ assert map_shape.key.type_name == 'string'
+ assert map_shape.value.type_name == 'string'
def test_nested_structure(self):
b = model.DenormalizedStructureBuilder()
@@ -931,8 +919,7 @@ class TestBuilders(unittest.TestCase):
}
}
}).build_model()
- self.assertEqual(
- shape.members['A'].members['B'].members['C'].type_name, 'string')
+ assert shape.members['A'].members['B'].members['C'].type_name == 'string'
def test_enum_values_on_string_used(self):
b = model.DenormalizedStructureBuilder()
@@ -943,11 +930,11 @@ class TestBuilders(unittest.TestCase):
'enum': enum_values,
},
}).build_model()
- self.assertIsInstance(shape, model.StructureShape)
+ assert isinstance(shape, model.StructureShape)
string_shape = shape.members['A']
- self.assertIsInstance(string_shape, model.StringShape)
- self.assertEqual(string_shape.metadata['enum'], enum_values)
- self.assertEqual(string_shape.enum, enum_values)
+ assert isinstance(string_shape, model.StringShape)
+ assert string_shape.metadata['enum'] == enum_values
+ assert string_shape.enum == enum_values
def test_documentation_on_shape_used(self):
b = model.DenormalizedStructureBuilder()
@@ -957,8 +944,7 @@ class TestBuilders(unittest.TestCase):
'documentation': 'MyDocs',
},
}).build_model()
- self.assertEqual(shape.members['A'].documentation,
- 'MyDocs')
+ assert shape.members['A'].documentation == 'MyDocs'
def test_min_max_used_in_metadata(self):
b = model.DenormalizedStructureBuilder()
@@ -971,8 +957,8 @@ class TestBuilders(unittest.TestCase):
},
}).build_model()
metadata = shape.members['A'].metadata
- self.assertEqual(metadata.get('min'), 2)
- self.assertEqual(metadata.get('max'), 3)
+ assert metadata.get('min') == 2
+ assert metadata.get('max') == 3
def test_use_shape_name_when_provided(self):
b = model.DenormalizedStructureBuilder()
@@ -982,11 +968,11 @@ class TestBuilders(unittest.TestCase):
'shape_name': 'MyStringShape',
},
}).build_model()
- self.assertEqual(shape.members['A'].name, 'MyStringShape')
+ assert shape.members['A'].name == 'MyStringShape'
def test_unknown_shape_type(self):
b = model.DenormalizedStructureBuilder()
- with self.assertRaises(model.InvalidShapeError):
+ with pytest.raises(model.InvalidShapeError):
b.with_members({
'A': {
'type': 'brand-new-shape-type',
@@ -1017,10 +1003,10 @@ class TestBuilders(unittest.TestCase):
)).build_model()
# Members should be in order
- self.assertEqual(['A', 'B'], list(shape.members.keys()))
+ assert list(shape.members.keys()) == ['A', 'B']
# Nested structure members should *also* stay ordered
- self.assertEqual(['C', 'D'], list(shape.members['B'].members.keys()))
+ assert list(shape.members['B'].members.keys()) == ['C', 'D']
if __name__ == '__main__':
diff --git a/tests/unit/test_monitoring.py b/tests/unit/test_monitoring.py
index 0ee6a873..5e1fac0c 100644
--- a/tests/unit/test_monitoring.py
+++ b/tests/unit/test_monitoring.py
@@ -15,6 +15,7 @@ import json
import re
import socket
import time
+import pytest
from botocore.awsrequest import AWSRequest
from botocore.compat import six
@@ -44,16 +45,13 @@ class TestMonitor(unittest.TestCase):
def test_register(self):
event_emitter = mock.Mock(HierarchicalEmitter)
self.handler.register(event_emitter)
- self.assertEqual(
- event_emitter.register_last.call_args_list,
- [
+ assert event_emitter.register_last.call_args_list == [
mock.call('before-parameter-build', self.handler.capture),
mock.call('request-created', self.handler.capture),
mock.call('response-received', self.handler.capture),
mock.call('after-call', self.handler.capture),
mock.call('after-call-error', self.handler.capture),
]
- )
def test_handle(self):
event = object()
@@ -116,23 +114,19 @@ class TestMonitorEventAdapter(unittest.TestCase):
})
def test_feed_before_parameter_build_returns_no_event(self):
- self.assertIsNone(
- self.adapter.feed('before-parameter-build', {
+ assert self.adapter.feed('before-parameter-build', {
'model': self.operation_model,
'context': self.context
- })
- )
+ }) is None
def test_feed_request_created_returns_no_event(self):
self.adapter.feed('before-parameter-build', {
'model': self.operation_model,
'context': self.context
})
- self.assertIsNone(
- self.adapter.feed('request-created', {
+ assert self.adapter.feed('request-created', {
'request': self.request,
- })
- )
+ }) is None
def test_feed_with_successful_response(self):
self.feed_before_parameter_build_event(current_time=1)
@@ -149,9 +143,7 @@ class TestMonitorEventAdapter(unittest.TestCase):
'context': self.context,
'exception': None
})
- self.assertEqual(
- attempt_event,
- APICallAttemptEvent(
+ assert attempt_event == APICallAttemptEvent(
service=self.service_id,
operation=self.wire_name,
timestamp=2000,
@@ -161,7 +153,6 @@ class TestMonitorEventAdapter(unittest.TestCase):
http_status_code=self.http_status_code,
response_headers=self.response_headers,
)
- )
self.mock_time.return_value = 4
call_event = self.adapter.feed('after-call', {
@@ -173,16 +164,13 @@ class TestMonitorEventAdapter(unittest.TestCase):
},
'context': self.context
})
- self.assertEqual(
- call_event,
- APICallEvent(
+ assert call_event == APICallEvent(
service=self.service_id,
operation=self.wire_name,
timestamp=1000,
latency=3000,
attempts=[attempt_event]
)
- )
def test_feed_with_retries(self):
self.feed_before_parameter_build_event(current_time=1)
@@ -199,9 +187,7 @@ class TestMonitorEventAdapter(unittest.TestCase):
'context': self.context,
'exception': None
})
- self.assertEqual(
- first_attempt_event,
- APICallAttemptEvent(
+ assert first_attempt_event == APICallAttemptEvent(
service=self.service_id,
operation=self.wire_name,
timestamp=2000,
@@ -211,7 +197,6 @@ class TestMonitorEventAdapter(unittest.TestCase):
http_status_code=500,
response_headers=self.response_headers,
)
- )
self.feed_request_created_event(current_time=5)
self.mock_time.return_value = 6
@@ -225,9 +210,7 @@ class TestMonitorEventAdapter(unittest.TestCase):
'context': self.context,
'exception': None
})
- self.assertEqual(
- second_attempt_event,
- APICallAttemptEvent(
+ assert second_attempt_event == APICallAttemptEvent(
service=self.service_id,
operation=self.wire_name,
timestamp=5000,
@@ -237,7 +220,6 @@ class TestMonitorEventAdapter(unittest.TestCase):
http_status_code=200,
response_headers=self.response_headers,
)
- )
self.mock_time.return_value = 7
call_event = self.adapter.feed('after-call', {
@@ -249,16 +231,13 @@ class TestMonitorEventAdapter(unittest.TestCase):
},
'context': self.context
})
- self.assertEqual(
- call_event,
- APICallEvent(
+ assert call_event == APICallEvent(
service=self.service_id,
operation=self.wire_name,
timestamp=1000,
latency=6000,
attempts=[first_attempt_event, second_attempt_event]
)
- )
def test_feed_with_retries_exceeded(self):
self.feed_before_parameter_build_event(current_time=1)
@@ -299,9 +278,7 @@ class TestMonitorEventAdapter(unittest.TestCase):
},
'context': self.context
})
- self.assertEqual(
- call_event,
- APICallEvent(
+ assert call_event == APICallEvent(
service=self.service_id,
operation=self.wire_name,
timestamp=1000,
@@ -309,7 +286,6 @@ class TestMonitorEventAdapter(unittest.TestCase):
attempts=[first_attempt_event, second_attempt_event],
retries_exceeded=True
)
- )
def test_feed_with_parsed_error(self):
self.feed_before_parameter_build_event(current_time=1)
@@ -329,9 +305,7 @@ class TestMonitorEventAdapter(unittest.TestCase):
'context': self.context,
'exception': None
})
- self.assertEqual(
- attempt_event,
- APICallAttemptEvent(
+ assert attempt_event == APICallAttemptEvent(
service=self.service_id,
operation=self.wire_name,
timestamp=2000,
@@ -342,23 +316,19 @@ class TestMonitorEventAdapter(unittest.TestCase):
response_headers=self.response_headers,
parsed_error=parsed_error
)
- )
self.mock_time.return_value = 4
call_event = self.adapter.feed('after-call', {
'parsed': parsed_response,
'context': self.context
})
- self.assertEqual(
- call_event,
- APICallEvent(
+ assert call_event == APICallEvent(
service=self.service_id,
operation=self.wire_name,
timestamp=1000,
latency=3000,
attempts=[attempt_event]
)
- )
def test_feed_with_wire_exception(self):
self.feed_before_parameter_build_event(current_time=1)
@@ -371,9 +341,7 @@ class TestMonitorEventAdapter(unittest.TestCase):
'context': self.context,
'exception': wire_exception
})
- self.assertEqual(
- attempt_event,
- APICallAttemptEvent(
+ assert attempt_event == APICallAttemptEvent(
service=self.service_id,
operation=self.wire_name,
timestamp=2000,
@@ -382,7 +350,6 @@ class TestMonitorEventAdapter(unittest.TestCase):
request_headers=self.request_headers,
wire_exception=wire_exception,
)
- )
self.mock_time.return_value = 4
call_event = self.adapter.feed(
@@ -391,16 +358,13 @@ class TestMonitorEventAdapter(unittest.TestCase):
'context': self.context
}
)
- self.assertEqual(
- call_event,
- APICallEvent(
+ assert call_event == APICallEvent(
service=self.service_id,
operation=self.wire_name,
timestamp=1000,
latency=3000,
attempts=[attempt_event]
)
- )
def test_feed_with_wire_exception_retries_exceeded(self):
self.feed_before_parameter_build_event(current_time=1)
@@ -421,9 +385,7 @@ class TestMonitorEventAdapter(unittest.TestCase):
'context': self.context
}
)
- self.assertEqual(
- call_event,
- APICallEvent(
+ assert call_event == APICallEvent(
service=self.service_id,
operation=self.wire_name,
timestamp=1000,
@@ -431,7 +393,6 @@ class TestMonitorEventAdapter(unittest.TestCase):
attempts=[attempt_event],
retries_exceeded=True
)
- )
class TestBaseMonitorEvent(unittest.TestCase):
@@ -439,37 +400,29 @@ class TestBaseMonitorEvent(unittest.TestCase):
event = BaseMonitorEvent(
service='MyService', operation='MyOperation', timestamp=1000
)
- self.assertEqual(event.service, 'MyService')
- self.assertEqual(event.operation, 'MyOperation')
- self.assertEqual(event.timestamp, 1000)
+ assert event.service == 'MyService'
+ assert event.operation == 'MyOperation'
+ assert event.timestamp == 1000
def test_eq(self):
- self.assertEqual(
- BaseMonitorEvent(
+ assert BaseMonitorEvent(
service='MyService', operation='MyOperation', timestamp=1000
- ),
- BaseMonitorEvent(
+ ) == BaseMonitorEvent(
service='MyService', operation='MyOperation', timestamp=1000
)
- )
def test_not_eq_different_classes(self):
- self.assertNotEqual(
- BaseMonitorEvent(
+ assert BaseMonitorEvent(
service='MyService', operation='MyOperation', timestamp=1000
- ), object()
- )
+ ) != object()
def test_not_eq_different_attrs(self):
- self.assertNotEqual(
- BaseMonitorEvent(
+ assert BaseMonitorEvent(
service='MyService', operation='MyOperation', timestamp=1000
- ),
- BaseMonitorEvent(
+ ) != BaseMonitorEvent(
service='DifferentService', operation='DifferentOperation',
timestamp=0
)
- )
class TestAPICallEvent(unittest.TestCase):
@@ -478,11 +431,11 @@ class TestAPICallEvent(unittest.TestCase):
service='MyService', operation='MyOperation', timestamp=1000,
latency=2000, attempts=[]
)
- self.assertEqual(event.service, 'MyService')
- self.assertEqual(event.operation, 'MyOperation')
- self.assertEqual(event.timestamp, 1000)
- self.assertEqual(event.latency, 2000)
- self.assertEqual(event.attempts, [])
+ assert event.service == 'MyService'
+ assert event.operation == 'MyOperation'
+ assert event.timestamp == 1000
+ assert event.latency == 2000
+ assert event.attempts == []
def test_new_api_call_attempt_event(self):
event = APICallEvent(
@@ -490,13 +443,10 @@ class TestAPICallEvent(unittest.TestCase):
latency=2000, attempts=[]
)
attempt_event = event.new_api_call_attempt(timestamp=2000)
- self.assertEqual(
- attempt_event,
- APICallAttemptEvent(
+ assert attempt_event == APICallAttemptEvent(
service='MyService', operation='MyOperation', timestamp=2000
)
- )
- self.assertEqual(event.attempts, [attempt_event])
+ assert event.attempts == [attempt_event]
class TestAPICallAttemptEvent(unittest.TestCase):
@@ -510,16 +460,16 @@ class TestAPICallAttemptEvent(unittest.TestCase):
response_headers={}, parsed_error=parsed_error,
wire_exception=wire_exception
)
- self.assertEqual(event.service, 'MyService')
- self.assertEqual(event.operation, 'MyOperation')
- self.assertEqual(event.timestamp, 1000)
- self.assertEqual(event.latency, 2000)
- self.assertEqual(event.url, url)
- self.assertEqual(event.http_status_code, 200)
- self.assertEqual(event.request_headers, {})
- self.assertEqual(event.response_headers, {})
- self.assertEqual(event.parsed_error, parsed_error)
- self.assertEqual(event.wire_exception, wire_exception)
+ assert event.service == 'MyService'
+ assert event.operation == 'MyOperation'
+ assert event.timestamp == 1000
+ assert event.latency == 2000
+ assert event.url == url
+ assert event.http_status_code == 200
+ assert event.request_headers == {}
+ assert event.response_headers == {}
+ assert event.parsed_error == parsed_error
+ assert event.wire_exception == wire_exception
class TestCSMSerializer(unittest.TestCase):
@@ -543,27 +493,26 @@ class TestCSMSerializer(unittest.TestCase):
def test_validates_csm_client_id(self):
max_client_id_len = 255
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
CSMSerializer('a' * (max_client_id_len + 1))
def test_serialize_produces_bytes(self):
event = APICallEvent(
service=self.service, operation=self.operation, timestamp=1000)
serialized_event = self.serializer.serialize(event)
- self.assertIsInstance(serialized_event, six.binary_type)
+ assert isinstance(serialized_event, six.binary_type)
def test_serialize_does_not_add_whitespace(self):
event = APICallEvent(
service=self.service, operation=self.operation, timestamp=1000)
serialized_event = self.serializer.serialize(event)
- self.assertIsNone(re.match(r'\s', serialized_event.decode('utf-8')))
+ assert re.match(r'\s', serialized_event.decode('utf-8')) is None
def test_serialize_api_call_event(self):
event = APICallEvent(
service=self.service, operation=self.operation, timestamp=1000)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(
- serialized_event_dict, {
+ assert serialized_event_dict == {
'Version': 1,
'Type': 'ApiCall',
'Service': self.service,
@@ -573,21 +522,20 @@ class TestCSMSerializer(unittest.TestCase):
'Timestamp': 1000,
'AttemptCount': 0,
}
- )
def test_serialize_api_call_event_with_latency(self):
event = APICallEvent(
service=self.service, operation=self.operation,
timestamp=1000, latency=2000)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(serialized_event_dict['Latency'], self.latency)
+ assert serialized_event_dict['Latency'] == self.latency
def test_serialize_api_call_event_with_attempts(self):
event = APICallEvent(
service=self.service, operation=self.operation, timestamp=1000)
event.new_api_call_attempt(2000)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(serialized_event_dict['AttemptCount'], 1)
+ assert serialized_event_dict['AttemptCount'] == 1
def test_serialize_api_call_event_region(self):
event = APICallEvent(
@@ -603,7 +551,7 @@ class TestCSMSerializer(unittest.TestCase):
self.request_headers['Authorization'] = auth_value
attempt.request_headers = self.request_headers
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(serialized_event_dict['Region'], 'my-region-1')
+ assert serialized_event_dict['Region'] == 'my-region-1'
def test_serialize_api_call_event_user_agent(self):
event = APICallEvent(
@@ -611,7 +559,7 @@ class TestCSMSerializer(unittest.TestCase):
attempt = event.new_api_call_attempt(2000)
attempt.request_headers = {'User-Agent': self.user_agent}
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(serialized_event_dict['UserAgent'], self.user_agent)
+ assert serialized_event_dict['UserAgent'] == self.user_agent
def test_serialize_api_call_event_http_status_code(self):
event = APICallEvent(
@@ -619,7 +567,7 @@ class TestCSMSerializer(unittest.TestCase):
attempt = event.new_api_call_attempt(2000)
attempt.http_status_code = 200
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(serialized_event_dict['FinalHttpStatusCode'], 200)
+ assert serialized_event_dict['FinalHttpStatusCode'] == 200
def test_serialize_api_call_event_parsed_error(self):
event = APICallEvent(
@@ -630,12 +578,8 @@ class TestCSMSerializer(unittest.TestCase):
'Message': 'My error message'
}
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(
- serialized_event_dict['FinalAwsException'], 'MyErrorCode')
- self.assertEqual(
- serialized_event_dict['FinalAwsExceptionMessage'],
- 'My error message'
- )
+ assert serialized_event_dict['FinalAwsException'] == 'MyErrorCode'
+ assert serialized_event_dict['FinalAwsExceptionMessage'] == 'My error message'
def test_serialize_api_call_event_wire_exception(self):
event = APICallEvent(
@@ -643,27 +587,22 @@ class TestCSMSerializer(unittest.TestCase):
attempt = event.new_api_call_attempt(2000)
attempt.wire_exception = Exception('Error on the wire')
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(
- serialized_event_dict['FinalSdkException'], 'Exception')
- self.assertEqual(
- serialized_event_dict['FinalSdkExceptionMessage'],
- 'Error on the wire'
- )
+ assert serialized_event_dict['FinalSdkException'] == 'Exception'
+ assert serialized_event_dict['FinalSdkExceptionMessage'] == 'Error on the wire'
def test_serialize_api_call_event_with_retries_exceeded(self):
event = APICallEvent(
service=self.service, operation=self.operation, timestamp=1000,
retries_exceeded=True)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(serialized_event_dict['MaxRetriesExceeded'], 1)
+ assert serialized_event_dict['MaxRetriesExceeded'] == 1
def test_serialize_api_call_attempt_event(self):
event = APICallAttemptEvent(
service=self.service, operation=self.operation,
timestamp=self.timestamp)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(
- serialized_event_dict, {
+ assert serialized_event_dict == {
'Version': 1,
'Type': 'ApiCallAttempt',
'Service': self.service,
@@ -671,14 +610,13 @@ class TestCSMSerializer(unittest.TestCase):
'ClientId': self.csm_client_id,
'Timestamp': self.timestamp,
}
- )
def test_serialize_api_call_attempt_event_with_latency(self):
event = APICallAttemptEvent(
service=self.service, operation=self.operation,
timestamp=self.timestamp, latency=self.latency)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(serialized_event_dict['AttemptLatency'], self.latency)
+ assert serialized_event_dict['AttemptLatency'] == self.latency
def test_serialize_with_user_agent(self):
event = APICallAttemptEvent(
@@ -687,14 +625,14 @@ class TestCSMSerializer(unittest.TestCase):
request_headers={'User-Agent': self.user_agent}
)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(serialized_event_dict['UserAgent'], self.user_agent)
+ assert serialized_event_dict['UserAgent'] == self.user_agent
def test_serialize_with_url(self):
event = APICallAttemptEvent(
service=self.service, operation=self.operation,
timestamp=self.timestamp, url=self.url)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(serialized_event_dict['Fqdn'], self.fqdn)
+ assert serialized_event_dict['Fqdn'] == self.fqdn
def test_serialize_with_s3_signing(self):
auth_value = 'AWS myaccesskey:somesignature'
@@ -703,7 +641,7 @@ class TestCSMSerializer(unittest.TestCase):
service=self.service, operation=self.operation,
timestamp=self.timestamp, request_headers=self.request_headers)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(serialized_event_dict['AccessKey'], 'myaccesskey')
+ assert serialized_event_dict['AccessKey'] == 'myaccesskey'
def test_serialize_with_sigv4_sigining(self):
auth_value = (
@@ -718,7 +656,7 @@ class TestCSMSerializer(unittest.TestCase):
service=self.service, operation=self.operation,
timestamp=self.timestamp, request_headers=self.request_headers)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(serialized_event_dict['AccessKey'], 'myaccesskey')
+ assert serialized_event_dict['AccessKey'] == 'myaccesskey'
def test_serialize_with_session_token(self):
self.request_headers['X-Amz-Security-Token'] = 'my-security-token'
@@ -726,8 +664,7 @@ class TestCSMSerializer(unittest.TestCase):
service=self.service, operation=self.operation,
timestamp=self.timestamp, request_headers=self.request_headers)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(
- serialized_event_dict['SessionToken'], 'my-security-token')
+ assert serialized_event_dict['SessionToken'] == 'my-security-token'
def test_serialize_with_path_parameters_in_url(self):
self.url = 'https://' + self.fqdn + '/resource'
@@ -735,7 +672,7 @@ class TestCSMSerializer(unittest.TestCase):
service=self.service, operation=self.operation,
timestamp=self.timestamp, url=self.url)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(serialized_event_dict['Fqdn'], self.fqdn)
+ assert serialized_event_dict['Fqdn'] == self.fqdn
def test_serialize_with_request_id_headers(self):
response_headers = {
@@ -747,9 +684,9 @@ class TestCSMSerializer(unittest.TestCase):
service=self.service, operation=self.operation,
timestamp=self.timestamp, response_headers=response_headers)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(serialized_event_dict['XAmznRequestId'], 'id1')
- self.assertEqual(serialized_event_dict['XAmzRequestId'], 'id2')
- self.assertEqual(serialized_event_dict['XAmzId2'], 'id3')
+ assert serialized_event_dict['XAmznRequestId'] == 'id1'
+ assert serialized_event_dict['XAmzRequestId'] == 'id2'
+ assert serialized_event_dict['XAmzId2'] == 'id3'
def test_serialize_filters_unwanted_response_headers(self):
response_headers = {'filter-out': 'do-not-include-this'}
@@ -757,8 +694,7 @@ class TestCSMSerializer(unittest.TestCase):
service=self.service, operation=self.operation,
timestamp=self.timestamp, response_headers=response_headers)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(
- serialized_event_dict, {
+ assert serialized_event_dict == {
'Version': 1,
'Type': 'ApiCallAttempt',
'Service': self.service,
@@ -766,14 +702,13 @@ class TestCSMSerializer(unittest.TestCase):
'ClientId': self.csm_client_id,
'Timestamp': self.timestamp,
}
- )
def test_serialize_with_status_code(self):
event = APICallAttemptEvent(
service=self.service, operation=self.operation,
timestamp=self.timestamp, http_status_code=200)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(serialized_event_dict['HttpStatusCode'], 200)
+ assert serialized_event_dict['HttpStatusCode'] == 200
def test_serialize_with_service_error(self):
event = APICallAttemptEvent(
@@ -784,9 +719,8 @@ class TestCSMSerializer(unittest.TestCase):
}
)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(serialized_event_dict['AwsException'], 'MyErrorCode')
- self.assertEqual(
- serialized_event_dict['AwsExceptionMessage'], 'My error message')
+ assert serialized_event_dict['AwsException'] == 'MyErrorCode'
+ assert serialized_event_dict['AwsExceptionMessage'] == 'My error message'
def test_serialize_with_wire_exception(self):
event = APICallAttemptEvent(
@@ -795,9 +729,8 @@ class TestCSMSerializer(unittest.TestCase):
wire_exception=Exception('Error on the wire')
)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(serialized_event_dict['SdkException'], 'Exception')
- self.assertEqual(
- serialized_event_dict['SdkExceptionMessage'], 'Error on the wire')
+ assert serialized_event_dict['SdkException'] == 'Exception'
+ assert serialized_event_dict['SdkExceptionMessage'] == 'Error on the wire'
def test_serialize_truncates_long_user_agent(self):
max_user_agent_length = 256
@@ -808,10 +741,7 @@ class TestCSMSerializer(unittest.TestCase):
request_headers={'User-Agent': user_agent}
)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(
- serialized_event_dict['UserAgent'],
- user_agent[:max_user_agent_length]
- )
+ assert serialized_event_dict['UserAgent'] == user_agent[:max_user_agent_length]
def test_serialize_truncates_long_service_error(self):
max_error_code_length = 128
@@ -826,14 +756,10 @@ class TestCSMSerializer(unittest.TestCase):
}
)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(
- serialized_event_dict['AwsException'],
- long_error_code[:max_error_code_length]
- )
- self.assertEqual(
- serialized_event_dict['AwsExceptionMessage'],
- long_error_message[:max_error_message_length]
- )
+ assert serialized_event_dict['AwsException'] == long_error_code[
+ :max_error_code_length]
+ assert serialized_event_dict['AwsExceptionMessage'] == long_error_message[
+ :max_error_message_length]
def test_serialize_truncates_long_wire_exception(self):
max_class_name_length = 128
@@ -848,14 +774,10 @@ class TestCSMSerializer(unittest.TestCase):
)
serialized_event_dict = self.get_serialized_event_dict(event)
- self.assertEqual(
- serialized_event_dict['SdkException'],
- long_class_name[:max_class_name_length]
- )
- self.assertEqual(
- serialized_event_dict['SdkExceptionMessage'],
- long_error_message[:max_error_message_length]
- )
+ assert serialized_event_dict['SdkException'] == long_class_name[
+ :max_class_name_length]
+ assert serialized_event_dict['SdkExceptionMessage'] == long_error_message[
+ :max_error_message_length]
class TestSocketPublisher(unittest.TestCase):
diff --git a/tests/unit/test_paginate.py b/tests/unit/test_paginate.py
index 18aa17fc..c0ce7e39 100644
--- a/tests/unit/test_paginate.py
+++ b/tests/unit/test_paginate.py
@@ -19,6 +19,7 @@ from botocore.paginate import TokenDecoder
from botocore.paginate import TokenEncoder
from botocore.exceptions import PaginationError
from botocore.compat import six
+import pytest
from tests import mock
@@ -34,14 +35,14 @@ class TestTokenDecoder(unittest.TestCase):
def test_decode(self):
token = 'eyJmb28iOiAiYmFyIn0='
expected = {'foo': 'bar'}
- self.assertEqual(self.decoder.decode(token), expected)
+ assert self.decoder.decode(token) == expected
def test_decode_with_bytes(self):
token = (
'eyJib3RvX2VuY29kZWRfa2V5cyI6IFtbImZvbyJdXSwgImZvbyI6ICJZbUZ5In0='
)
expected = {'foo': b'bar'}
- self.assertEqual(self.decoder.decode(token), expected)
+ assert self.decoder.decode(token) == expected
def test_decode_with_nested_bytes(self):
token = (
@@ -49,7 +50,7 @@ class TestTokenDecoder(unittest.TestCase):
'IFtbImZvbyIsICJiYXIiXV19'
)
expected = {'foo': {'bar': b'baz'}}
- self.assertEqual(self.decoder.decode(token), expected)
+ assert self.decoder.decode(token) == expected
def test_decode_with_listed_bytes(self):
token = (
@@ -57,7 +58,7 @@ class TestTokenDecoder(unittest.TestCase):
'OiB7ImJhciI6IFsiYmF6IiwgIlltbHUiXX19'
)
expected = {'foo': {'bar': ['baz', b'bin']}}
- self.assertEqual(self.decoder.decode(token), expected)
+ assert self.decoder.decode(token) == expected
def test_decode_with_multiple_bytes_values(self):
token = (
@@ -65,7 +66,7 @@ class TestTokenDecoder(unittest.TestCase):
'YmFyIl1dLCAiZm9vIjogeyJiaW4iOiAiWW1GdCIsICJiYXIiOiAiWW1GNiJ9fQ=='
)
expected = {'foo': {'bar': b'baz', 'bin': b'bam'}}
- self.assertEqual(self.decoder.decode(token), expected)
+ assert self.decoder.decode(token) == expected
class TestPaginatorModel(unittest.TestCase):
@@ -82,14 +83,12 @@ class TestPaginatorModel(unittest.TestCase):
def test_get_paginator(self):
paginator_config = self.paginator_model.get_paginator('ListFoos')
- self.assertEqual(
- paginator_config,
- {'output_token': 'NextToken', 'input_token': 'NextToken',
- 'result_key': 'Foo'}
- )
+ assert paginator_config == {
+ 'output_token': 'NextToken', 'input_token': 'NextToken',
+ 'result_key': 'Foo'}
def test_get_paginator_no_exists(self):
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
paginator_config = self.paginator_model.get_paginator('ListBars')
@@ -102,19 +101,17 @@ class TestPagination(unittest.TestCase):
'input_token': 'NextToken',
'result_key': 'Foo',
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
def test_result_key_available(self):
- self.assertEqual(
- [rk.expression for rk in self.paginator.result_keys],
- ['Foo']
- )
+ assert [rk.expression for rk in self.paginator.result_keys] == ['Foo']
def test_no_next_token(self):
response = {'not_the_next_token': 'foobar'}
self.method.return_value = response
actual = list(self.paginator.paginate())
- self.assertEqual(actual, [{'not_the_next_token': 'foobar'}])
+ assert actual == [{'not_the_next_token': 'foobar'}]
def test_next_token_in_response(self):
responses = [{'NextToken': 'token1'},
@@ -122,12 +119,12 @@ class TestPagination(unittest.TestCase):
{'not_next_token': 'foo'}]
self.method.side_effect = responses
actual = list(self.paginator.paginate())
- self.assertEqual(actual, responses)
+ assert actual == responses
# The first call has no next token, the second and third call should
# have 'token1' and 'token2' respectively.
- self.assertEqual(self.method.call_args_list,
- [mock.call(), mock.call(NextToken='token1'),
- mock.call(NextToken='token2')])
+ assert self.method.call_args_list == [
+ mock.call(), mock.call(NextToken='token1'),
+ mock.call(NextToken='token2')]
def test_next_token_is_string(self):
self.paginate_config = {
@@ -136,7 +133,8 @@ class TestPagination(unittest.TestCase):
"result_key": "Users",
"limit_key": "MaxKeys",
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
responses = [
{"Users": ["User1"], "Marker": "m1"},
{"Users": ["User2"], "Marker": "m2"},
@@ -146,7 +144,7 @@ class TestPagination(unittest.TestCase):
result = self.paginator.paginate(PaginationConfig={'MaxItems': 1})
result = result.build_full_result()
token = result.get('NextToken')
- self.assertIsInstance(token, six.string_types)
+ assert isinstance(token, six.string_types)
def test_any_passed_in_args_are_unmodified(self):
responses = [{'NextToken': 'token1'},
@@ -154,19 +152,18 @@ class TestPagination(unittest.TestCase):
{'not_next_token': 'foo'}]
self.method.side_effect = responses
actual = list(self.paginator.paginate(Foo='foo', Bar='bar'))
- self.assertEqual(actual, responses)
- self.assertEqual(
- self.method.call_args_list,
- [mock.call(Foo='foo', Bar='bar'),
- mock.call(Foo='foo', Bar='bar', NextToken='token1'),
- mock.call(Foo='foo', Bar='bar', NextToken='token2')])
+ assert actual == responses
+ assert self.method.call_args_list == [
+ mock.call(Foo='foo', Bar='bar'),
+ mock.call(Foo='foo', Bar='bar', NextToken='token1'),
+ mock.call(Foo='foo', Bar='bar', NextToken='token2')]
def test_exception_raised_if_same_next_token(self):
responses = [{'NextToken': 'token1'},
{'NextToken': 'token2'},
{'NextToken': 'token2'}]
self.method.side_effect = responses
- with self.assertRaises(PaginationError):
+ with pytest.raises(PaginationError):
list(self.paginator.paginate())
def test_next_token_with_or_expression(self):
@@ -175,7 +172,8 @@ class TestPagination(unittest.TestCase):
'input_token': 'NextToken',
'result_key': 'Foo',
}
- self.paginator = Paginator(self.method, self.pagination_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.pagination_config, self.model)
# Verify that despite varying between NextToken and NextToken2
# we still can extract the right next tokens.
responses = [
@@ -189,12 +187,11 @@ class TestPagination(unittest.TestCase):
]
self.method.side_effect = responses
list(self.paginator.paginate())
- self.assertEqual(
- self.method.call_args_list,
- [mock.call(),
- mock.call(NextToken='token1'),
- mock.call(NextToken='token2'),
- mock.call(NextToken='token3')])
+ assert self.method.call_args_list == [
+ mock.call(),
+ mock.call(NextToken='token1'),
+ mock.call(NextToken='token2'),
+ mock.call(NextToken='token3')]
def test_more_tokens(self):
# Some pagination configs have a 'more_token' key that
@@ -205,7 +202,8 @@ class TestPagination(unittest.TestCase):
'input_token': 'NextToken',
'result_key': 'Foo',
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
responses = [
{'Foo': [1], 'IsTruncated': True, 'NextToken': 'token1'},
{'Foo': [2], 'IsTruncated': True, 'NextToken': 'token2'},
@@ -214,11 +212,10 @@ class TestPagination(unittest.TestCase):
]
self.method.side_effect = responses
list(self.paginator.paginate())
- self.assertEqual(
- self.method.call_args_list,
- [mock.call(),
- mock.call(NextToken='token1'),
- mock.call(NextToken='token2')])
+ assert self.method.call_args_list == [
+ mock.call(),
+ mock.call(NextToken='token1'),
+ mock.call(NextToken='token2')]
def test_more_tokens_is_path_expression(self):
self.paginate_config = {
@@ -227,17 +224,17 @@ class TestPagination(unittest.TestCase):
'input_token': 'NextToken',
'result_key': 'Bar',
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
responses = [
{'Foo': {'IsTruncated': True}, 'NextToken': 'token1'},
{'Foo': {'IsTruncated': False}, 'NextToken': 'token2'},
]
self.method.side_effect = responses
list(self.paginator.paginate())
- self.assertEqual(
- self.method.call_args_list,
- [mock.call(),
- mock.call(NextToken='token1')])
+ assert self.method.call_args_list == [
+ mock.call(),
+ mock.call(NextToken='token1')]
def test_page_size(self):
self.paginate_config = {
@@ -246,7 +243,8 @@ class TestPagination(unittest.TestCase):
"result_key": "Users",
"limit_key": "MaxKeys",
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
responses = [
{"Users": ["User1"], "Marker": "m1"},
{"Users": ["User2"], "Marker": "m2"},
@@ -256,12 +254,10 @@ class TestPagination(unittest.TestCase):
users = []
for page in self.paginator.paginate(PaginationConfig={'PageSize': 1}):
users += page['Users']
- self.assertEqual(
- self.method.call_args_list,
- [mock.call(MaxKeys=1),
- mock.call(Marker='m1', MaxKeys=1),
- mock.call(Marker='m2', MaxKeys=1)]
- )
+ assert self.method.call_args_list == [
+ mock.call(MaxKeys=1),
+ mock.call(Marker='m1', MaxKeys=1),
+ mock.call(Marker='m2', MaxKeys=1)]
def test_with_empty_markers(self):
responses = [
@@ -274,11 +270,9 @@ class TestPagination(unittest.TestCase):
for page in self.paginator.paginate():
users += page['Users']
# We want to stop paginating if the next token is empty.
- self.assertEqual(
- self.method.call_args_list,
- [mock.call()]
- )
- self.assertEqual(users, ['User1'])
+ assert self.method.call_args_list == [
+ mock.call()]
+ assert users == ['User1']
def test_build_full_result_with_single_key(self):
self.paginate_config = {
@@ -287,7 +281,8 @@ class TestPagination(unittest.TestCase):
"result_key": "Users",
"limit_key": "MaxKeys",
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
responses = [
{"Users": ["User1"], "Marker": "m1"},
{"Users": ["User2"], "Marker": "m2"},
@@ -296,7 +291,7 @@ class TestPagination(unittest.TestCase):
self.method.side_effect = responses
pages = self.paginator.paginate()
complete = pages.build_full_result()
- self.assertEqual(complete, {'Users': ['User1', 'User2', 'User3']})
+ assert complete == {'Users': ['User1', 'User2', 'User3']}
def test_build_multiple_results(self):
self.paginate_config = {
@@ -305,7 +300,8 @@ class TestPagination(unittest.TestCase):
"result_key": "Users",
"limit_key": "MaxKeys",
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
max_items = 3
page_size = 2
@@ -339,7 +335,7 @@ class TestPagination(unittest.TestCase):
'Marker': 'm2',
'boto_truncate_amount': 2,
})
- self.assertEqual(expected_token, result['NextToken'])
+ assert expected_token == result['NextToken']
class TestPaginatorPageSize(unittest.TestCase):
@@ -352,7 +348,8 @@ class TestPaginatorPageSize(unittest.TestCase):
"result_key": ["Users", "Groups"],
'limit_key': 'MaxKeys',
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
self.endpoint = mock.Mock()
def test_no_page_size(self):
@@ -360,7 +357,7 @@ class TestPaginatorPageSize(unittest.TestCase):
ref_kwargs = {'arg1': 'foo', 'arg2': 'bar'}
pages = self.paginator.paginate(**kwargs)
pages._inject_starting_params(kwargs)
- self.assertEqual(kwargs, ref_kwargs)
+ assert kwargs == ref_kwargs
def test_page_size(self):
kwargs = {'arg1': 'foo', 'arg2': 'bar',
@@ -371,7 +368,7 @@ class TestPaginatorPageSize(unittest.TestCase):
ref_kwargs = {'arg1': 'foo', 'arg2': 'bar', 'MaxKeys': 5}
pages = self.paginator.paginate(**kwargs)
pages._inject_starting_params(extracted_kwargs)
- self.assertEqual(extracted_kwargs, ref_kwargs)
+ assert extracted_kwargs == ref_kwargs
def test_page_size_incorrectly_provided(self):
kwargs = {'arg1': 'foo', 'arg2': 'bar',
@@ -379,7 +376,7 @@ class TestPaginatorPageSize(unittest.TestCase):
del self.paginate_config['limit_key']
paginator = Paginator(self.method, self.paginate_config, self.model)
- with self.assertRaises(PaginationError):
+ with pytest.raises(PaginationError):
paginator.paginate(**kwargs)
@@ -394,7 +391,8 @@ class TestPaginatorWithPathExpressions(unittest.TestCase):
'input_token': 'next_marker',
'result_key': 'Contents',
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
def test_s3_list_objects(self):
responses = [
@@ -403,11 +401,10 @@ class TestPaginatorWithPathExpressions(unittest.TestCase):
{'not_next_token': 'foo'}]
self.method.side_effect = responses
list(self.paginator.paginate())
- self.assertEqual(
- self.method.call_args_list,
- [mock.call(),
- mock.call(next_marker='token1'),
- mock.call(next_marker='token2')])
+ assert self.method.call_args_list == [
+ mock.call(),
+ mock.call(next_marker='token1'),
+ mock.call(next_marker='token2')]
def test_s3_list_object_complex(self):
responses = [
@@ -417,11 +414,10 @@ class TestPaginatorWithPathExpressions(unittest.TestCase):
{'not_next_token': 'foo'}]
self.method.side_effect = responses
list(self.paginator.paginate())
- self.assertEqual(
- self.method.call_args_list,
- [mock.call(),
- mock.call(next_marker='token1'),
- mock.call(next_marker='Last')])
+ assert self.method.call_args_list == [
+ mock.call(),
+ mock.call(next_marker='token1'),
+ mock.call(next_marker='Last')]
class TestBinaryTokens(unittest.TestCase):
@@ -433,7 +429,8 @@ class TestBinaryTokens(unittest.TestCase):
"input_token": "Marker",
"result_key": "Users"
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
def test_build_full_result_with_bytes(self):
responses = [
@@ -451,7 +448,7 @@ class TestBinaryTokens(unittest.TestCase):
"Users": ["User1", "User2", "User3"],
"NextToken": expected_token
}
- self.assertEqual(complete, expected_response)
+ assert complete == expected_response
def test_build_full_result_with_nested_bytes(self):
responses = [
@@ -469,7 +466,7 @@ class TestBinaryTokens(unittest.TestCase):
"Users": ["User1", "User2", "User3"],
"NextToken": expected_token
}
- self.assertEqual(complete, expected_response)
+ assert complete == expected_response
def test_build_full_result_with_listed_bytes(self):
responses = [
@@ -487,7 +484,7 @@ class TestBinaryTokens(unittest.TestCase):
"Users": ["User1", "User2", "User3"],
"NextToken": expected_token
}
- self.assertEqual(complete, expected_response)
+ assert complete == expected_response
def test_build_full_result_with_multiple_bytes_values(self):
responses = [
@@ -514,7 +511,7 @@ class TestBinaryTokens(unittest.TestCase):
"Users": ["User1", "User2", "User3"],
"NextToken": expected_token
}
- self.assertEqual(complete, expected_response)
+ assert complete == expected_response
def test_resume_with_bytes(self):
responses = [
@@ -531,7 +528,7 @@ class TestBinaryTokens(unittest.TestCase):
expected_response = {
"Users": ["User4", "User5"]
}
- self.assertEqual(complete, expected_response)
+ assert complete == expected_response
self.method.assert_any_call(Marker=b'\xff')
def test_resume_with_nested_bytes(self):
@@ -549,7 +546,7 @@ class TestBinaryTokens(unittest.TestCase):
expected_response = {
"Users": ["User4", "User5"]
}
- self.assertEqual(complete, expected_response)
+ assert complete == expected_response
self.method.assert_any_call(Marker={'key': b'\xff'})
def test_resume_with_listed_bytes(self):
@@ -567,7 +564,7 @@ class TestBinaryTokens(unittest.TestCase):
expected_response = {
"Users": ["User4", "User5"]
}
- self.assertEqual(complete, expected_response)
+ assert complete == expected_response
self.method.assert_any_call(Marker={'key': ['foo', b'\xff']})
def test_resume_with_multiple_bytes_values(self):
@@ -591,7 +588,7 @@ class TestBinaryTokens(unittest.TestCase):
expected_response = {
"Users": ["User4", "User5"]
}
- self.assertEqual(complete, expected_response)
+ assert complete == expected_response
self.method.assert_any_call(Marker={'key': b'\xfe', 'key2': b'\xee'})
@@ -606,7 +603,8 @@ class TestMultipleTokens(unittest.TestCase):
"input_token": ["key_marker", "upload_id_marker"],
"result_key": 'Foo',
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
def test_s3_list_multipart_uploads(self):
responses = [
@@ -620,13 +618,12 @@ class TestMultipleTokens(unittest.TestCase):
]
self.method.side_effect = responses
list(self.paginator.paginate())
- self.assertEqual(
- self.method.call_args_list,
- [mock.call(),
- mock.call(key_marker='key1', upload_id_marker='up1'),
- mock.call(key_marker='key2', upload_id_marker='up2'),
- mock.call(key_marker='key3', upload_id_marker='up3'),
- ])
+ assert self.method.call_args_list == [
+ mock.call(),
+ mock.call(key_marker='key1', upload_id_marker='up1'),
+ mock.call(key_marker='key2', upload_id_marker='up2'),
+ mock.call(key_marker='key3', upload_id_marker='up3'),
+ ]
class TestOptionalTokens(unittest.TestCase):
@@ -638,6 +635,7 @@ class TestOptionalTokens(unittest.TestCase):
request params from a previous page, the API will skip over a record.
"""
+
def setUp(self):
self.method = mock.Mock()
self.model = mock.Mock()
@@ -651,7 +649,8 @@ class TestOptionalTokens(unittest.TestCase):
"StartRecordIdentifier"],
"result_key": 'Foo',
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
def test_clean_token(self):
responses = [
@@ -669,13 +668,12 @@ class TestOptionalTokens(unittest.TestCase):
]
self.method.side_effect = responses
list(self.paginator.paginate())
- self.assertEqual(
- self.method.call_args_list,
- [mock.call(),
- mock.call(StartRecordName='aaa.example.com', StartRecordType='A',
- StartRecordIdentifier='id'),
- mock.call(StartRecordName='bbb.example.com', StartRecordType='A')
- ])
+ assert self.method.call_args_list == [
+ mock.call(),
+ mock.call(StartRecordName='aaa.example.com', StartRecordType='A',
+ StartRecordIdentifier='id'),
+ mock.call(StartRecordName='bbb.example.com', StartRecordType='A')
+ ]
class TestKeyIterators(unittest.TestCase):
@@ -688,7 +686,8 @@ class TestKeyIterators(unittest.TestCase):
"input_token": "Marker",
"result_key": "Users"
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
def test_result_key_iters(self):
responses = [
@@ -699,9 +698,8 @@ class TestKeyIterators(unittest.TestCase):
self.method.side_effect = responses
pages = self.paginator.paginate()
iterators = pages.result_key_iters()
- self.assertEqual(len(iterators), 1)
- self.assertEqual(list(iterators[0]),
- ["User1", "User2", "User3"])
+ assert len(iterators) == 1
+ assert list(iterators[0]) == ["User1", "User2", "User3"]
def test_build_full_result_with_single_key(self):
responses = [
@@ -712,7 +710,7 @@ class TestKeyIterators(unittest.TestCase):
self.method.side_effect = responses
pages = self.paginator.paginate()
complete = pages.build_full_result()
- self.assertEqual(complete, {'Users': ['User1', 'User2', 'User3']})
+ assert complete == {'Users': ['User1', 'User2', 'User3']}
def test_max_items_can_be_specified(self):
paginator = Paginator(self.method, self.paginate_config, self.model)
@@ -723,10 +721,9 @@ class TestKeyIterators(unittest.TestCase):
]
self.method.side_effect = responses
expected_token = encode_token({"Marker": "m1"})
- self.assertEqual(
- paginator.paginate(
- PaginationConfig={'MaxItems': 1}).build_full_result(),
- {'Users': ['User1'], 'NextToken': expected_token})
+ assert paginator.paginate(
+ PaginationConfig={'MaxItems': 1}).build_full_result() == {
+ 'Users': ['User1'], 'NextToken': expected_token}
def test_max_items_as_strings(self):
# Some services (route53) model MaxItems as a string type.
@@ -739,11 +736,10 @@ class TestKeyIterators(unittest.TestCase):
]
self.method.side_effect = responses
expected_token = encode_token({"Marker": "m1"})
- self.assertEqual(
- # Note MaxItems is a string here.
- paginator.paginate(
- PaginationConfig={'MaxItems': '1'}).build_full_result(),
- {'Users': ['User1'], 'NextToken': expected_token})
+ # Note MaxItems is a string here.
+ assert paginator.paginate(
+ PaginationConfig={'MaxItems': '1'}).build_full_result() == {
+ 'Users': ['User1'], 'NextToken': expected_token}
def test_next_token_on_page_boundary(self):
paginator = Paginator(self.method, self.paginate_config, self.model)
@@ -754,10 +750,9 @@ class TestKeyIterators(unittest.TestCase):
]
self.method.side_effect = responses
expected_token = encode_token({"Marker": "m2"})
- self.assertEqual(
- paginator.paginate(
- PaginationConfig={'MaxItems': 2}).build_full_result(),
- {'Users': ['User1', 'User2'], 'NextToken': expected_token})
+ assert paginator.paginate(
+ PaginationConfig={'MaxItems': 2}).build_full_result() == {
+ 'Users': ['User1', 'User2'], 'NextToken': expected_token}
def test_max_items_can_be_specified_truncates_response(self):
# We're saying we only want 4 items, but notice that the second
@@ -772,11 +767,10 @@ class TestKeyIterators(unittest.TestCase):
self.method.side_effect = responses
expected_token = encode_token(
{"Marker": "m1", "boto_truncate_amount": 1})
- self.assertEqual(
- paginator.paginate(
- PaginationConfig={'MaxItems': 4}).build_full_result(),
- {'Users': ['User1', 'User2', 'User3', 'User4'],
- 'NextToken': expected_token})
+ assert paginator.paginate(
+ PaginationConfig={'MaxItems': 4}).build_full_result() == {
+ 'Users': ['User1', 'User2', 'User3', 'User4'],
+ 'NextToken': expected_token}
def test_resume_next_marker_mid_page(self):
# This is a simulation of picking up from the response
@@ -792,14 +786,12 @@ class TestKeyIterators(unittest.TestCase):
starting_token = encode_token(
{"Marker": "m1", "boto_truncate_amount": 1})
pagination_config = {'StartingToken': starting_token}
- self.assertEqual(
- paginator.paginate(
- PaginationConfig=pagination_config).build_full_result(),
- {'Users': ['User5', 'User6', 'User7']})
- self.assertEqual(
- self.method.call_args_list,
- [mock.call(Marker='m1'),
- mock.call(Marker='m2')])
+ assert paginator.paginate(
+ PaginationConfig=pagination_config).build_full_result() == {
+ 'Users': ['User5', 'User6', 'User7']}
+ assert self.method.call_args_list == [
+ mock.call(Marker='m1'),
+ mock.call(Marker='m2')]
def test_max_items_exceeds_actual_amount(self):
# Because MaxItems=10 > number of users (3), we should just return
@@ -811,10 +803,9 @@ class TestKeyIterators(unittest.TestCase):
{"Users": ["User3"]},
]
self.method.side_effect = responses
- self.assertEqual(
- paginator.paginate(
- PaginationConfig={'MaxItems': 10}).build_full_result(),
- {'Users': ['User1', 'User2', 'User3']})
+ assert paginator.paginate(
+ PaginationConfig={'MaxItems': 10}).build_full_result() == {
+ 'Users': ['User1', 'User2', 'User3']}
def test_bad_input_tokens(self):
responses = [
@@ -823,7 +814,7 @@ class TestKeyIterators(unittest.TestCase):
{"Users": ["User3"]},
]
self.method.side_effect = responses
- with six.assertRaisesRegex(self, ValueError, 'Bad starting token'):
+ with pytest.raises(ValueError, match='Bad starting token'):
pagination_config = {'StartingToken': 'does___not___work'}
self.paginator.paginate(
PaginationConfig=pagination_config).build_full_result()
@@ -839,7 +830,8 @@ class TestMultipleResultKeys(unittest.TestCase):
"input_token": "Marker",
"result_key": ["Users", "Groups"],
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
def test_build_full_result_with_multiple_result_keys(self):
responses = [
@@ -850,9 +842,9 @@ class TestMultipleResultKeys(unittest.TestCase):
self.method.side_effect = responses
pages = self.paginator.paginate()
complete = pages.build_full_result()
- self.assertEqual(complete,
- {"Users": ['User1', 'User2', 'User3'],
- "Groups": ['Group1', 'Group2', 'Group3']})
+ assert complete == {
+ "Users": ['User1', 'User2', 'User3'],
+ "Groups": ['Group1', 'Group2', 'Group3']}
def test_build_full_result_with_different_length_result_keys(self):
responses = [
@@ -864,9 +856,9 @@ class TestMultipleResultKeys(unittest.TestCase):
self.method.side_effect = responses
pages = self.paginator.paginate()
complete = pages.build_full_result()
- self.assertEqual(complete,
- {"Users": ['User1'],
- "Groups": ['Group1', 'Group2', 'Group3']})
+ assert complete == {
+ "Users": ['User1'],
+ "Groups": ['Group1', 'Group2', 'Group3']}
def test_build_full_result_with_zero_length_result_key(self):
responses = [
@@ -880,9 +872,9 @@ class TestMultipleResultKeys(unittest.TestCase):
self.method.side_effect = responses
pages = self.paginator.paginate()
complete = pages.build_full_result()
- self.assertEqual(complete,
- {"Users": [],
- "Groups": ['Group1', 'Group2', 'Group3']})
+ assert complete == {
+ "Users": [],
+ "Groups": ['Group1', 'Group2', 'Group3']}
def test_build_result_with_secondary_keys(self):
responses = [
@@ -898,9 +890,9 @@ class TestMultipleResultKeys(unittest.TestCase):
complete = pages.build_full_result()
expected_token = encode_token(
{"Marker": None, "boto_truncate_amount": 1})
- self.assertEqual(complete,
- {"Users": ["User1"], "Groups": ["Group1", "Group2"],
- "NextToken": expected_token})
+ assert complete == {
+ "Users": ["User1"], "Groups": ["Group1", "Group2"],
+ "NextToken": expected_token}
def test_resume_with_secondary_keys(self):
# This is simulating a continutation of the previous test,
@@ -924,9 +916,9 @@ class TestMultipleResultKeys(unittest.TestCase):
# Note that the secondary keys ("Groups") are all truncated because
# they were in the original (first) response.
expected_token = encode_token({"Marker": "m1"})
- self.assertEqual(complete,
- {"Users": ["User2"], "Groups": [],
- "NextToken": expected_token})
+ assert complete == {
+ "Users": ["User2"], "Groups": [],
+ "NextToken": expected_token}
def test_resume_with_secondary_result_as_string(self):
self.method.return_value = {"Users": ["User1", "User2"], "Groups": "a"}
@@ -937,7 +929,7 @@ class TestMultipleResultKeys(unittest.TestCase):
complete = pages.build_full_result()
# Note that the secondary keys ("Groups") becomes empty string because
# they were in the original (first) response.
- self.assertEqual(complete, {"Users": ["User2"], "Groups": ""})
+ assert complete == {"Users": ["User2"], "Groups": ""}
def test_resume_with_secondary_result_as_integer(self):
self.method.return_value = {"Users": ["User1", "User2"], "Groups": 123}
@@ -948,7 +940,7 @@ class TestMultipleResultKeys(unittest.TestCase):
complete = pages.build_full_result()
# Note that the secondary keys ("Groups") becomes zero because
# they were in the original (first) response.
- self.assertEqual(complete, {"Users": ["User2"], "Groups": 0})
+ assert complete == {"Users": ["User2"], "Groups": 0}
class TestMultipleInputKeys(unittest.TestCase):
@@ -962,7 +954,8 @@ class TestMultipleInputKeys(unittest.TestCase):
"input_token": ["InMarker1", "InMarker2"],
"result_key": ["Users", "Groups"],
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
def test_build_full_result_with_multiple_input_keys(self):
responses = [
@@ -978,10 +971,10 @@ class TestMultipleInputKeys(unittest.TestCase):
complete = pages.build_full_result()
expected_token = encode_token(
{"InMarker1": "m1", "InMarker2": "m2", "boto_truncate_amount": 1})
- self.assertEqual(complete,
- {"Users": ['User1', 'User2', 'User3'],
- "Groups": ['Group1', 'Group2'],
- "NextToken": expected_token})
+ assert complete == {
+ "Users": ['User1', 'User2', 'User3'],
+ "Groups": ['Group1', 'Group2'],
+ "NextToken": expected_token}
def test_resume_with_multiple_input_keys(self):
responses = [
@@ -998,13 +991,12 @@ class TestMultipleInputKeys(unittest.TestCase):
complete = pages.build_full_result()
expected_token = encode_token(
{"InMarker1": "m3", "InMarker2": "m4"})
- self.assertEqual(complete,
- {"Users": ['User4'],
- "Groups": [],
- "NextToken": expected_token})
- self.assertEqual(
- self.method.call_args_list,
- [mock.call(InMarker1='m1', InMarker2='m2')])
+ assert complete == {
+ "Users": ['User4'],
+ "Groups": [],
+ "NextToken": expected_token}
+ assert self.method.call_args_list == [
+ mock.call(InMarker1='m1', InMarker2='m2')]
def test_resume_encounters_an_empty_payload(self):
response = {"not_a_result_key": "it happens in some service"}
@@ -1014,20 +1006,16 @@ class TestMultipleInputKeys(unittest.TestCase):
complete = self.paginator \
.paginate(PaginationConfig={'StartingToken': starting_token}) \
.build_full_result()
- self.assertEqual(complete, {})
+ assert complete == {}
def test_result_key_exposed_on_paginator(self):
- self.assertEqual(
- [rk.expression for rk in self.paginator.result_keys],
- ['Users', 'Groups']
- )
+ assert [rk.expression for rk in self.paginator.result_keys] == [
+ 'Users', 'Groups']
def test_result_key_exposed_on_page_iterator(self):
pages = self.paginator.paginate(MaxItems=3)
- self.assertEqual(
- [rk.expression for rk in pages.result_keys],
- ['Users', 'Groups']
- )
+ assert [rk.expression for rk in pages.result_keys] == [
+ 'Users', 'Groups']
class TestExpressionKeyIterators(unittest.TestCase):
@@ -1041,7 +1029,8 @@ class TestExpressionKeyIterators(unittest.TestCase):
"limit_key": "MaxRecords",
"result_key": "EngineDefaults.Parameters"
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
self.responses = [
{"EngineDefaults": {"Parameters": ["One", "Two"]},
"Marker": "m1"},
@@ -1054,19 +1043,19 @@ class TestExpressionKeyIterators(unittest.TestCase):
self.method.side_effect = self.responses
pages = self.paginator.paginate()
iterators = pages.result_key_iters()
- self.assertEqual(len(iterators), 1)
- self.assertEqual(list(iterators[0]),
- ['One', 'Two', 'Three', 'Four', 'Five'])
+ assert len(iterators) == 1
+ assert list(iterators[0]) == [
+ 'One', 'Two', 'Three', 'Four', 'Five']
def test_build_full_result_with_single_key(self):
self.method.side_effect = self.responses
pages = self.paginator.paginate()
complete = pages.build_full_result()
- self.assertEqual(complete, {
+ assert complete == {
'EngineDefaults': {
'Parameters': ['One', 'Two', 'Three', 'Four', 'Five']
},
- })
+ }
class TestIncludeResultKeys(unittest.TestCase):
@@ -1078,7 +1067,8 @@ class TestIncludeResultKeys(unittest.TestCase):
'input_token': 'Marker',
'result_key': ['ResultKey', 'Count', 'Log'],
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
def test_different_kinds_of_result_key(self):
self.method.side_effect = [
@@ -1093,7 +1083,7 @@ class TestIncludeResultKeys(unittest.TestCase):
'Count': 6,
'Log': 'xyz',
}
- self.assertEqual(pages.build_full_result(), expected)
+ assert pages.build_full_result() == expected
def test_result_key_is_missing(self):
self.method.side_effect = [
@@ -1102,7 +1092,7 @@ class TestIncludeResultKeys(unittest.TestCase):
]
pages = self.paginator.paginate()
expected = {}
- self.assertEqual(pages.build_full_result(), expected)
+ assert pages.build_full_result() == expected
class TestIncludeNonResultKeys(unittest.TestCase):
@@ -1117,7 +1107,8 @@ class TestIncludeNonResultKeys(unittest.TestCase):
'result_key': 'ResultKey',
'non_aggregate_keys': ['NotResultKey'],
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
def test_include_non_aggregate_keys(self):
self.method.side_effect = [
@@ -1127,16 +1118,17 @@ class TestIncludeNonResultKeys(unittest.TestCase):
]
pages = self.paginator.paginate()
actual = pages.build_full_result()
- self.assertEqual(pages.non_aggregate_part, {'NotResultKey': 'a'})
+ assert pages.non_aggregate_part == {'NotResultKey': 'a'}
expected = {
'ResultKey': ['foo', 'bar', 'baz'],
'NotResultKey': 'a',
}
- self.assertEqual(actual, expected)
+ assert actual == expected
def test_include_with_multiple_result_keys(self):
self.paginate_config['result_key'] = ['ResultKey1', 'ResultKey2']
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
self.method.side_effect = [
{'ResultKey1': ['a', 'b'], 'ResultKey2': ['u', 'v'],
'NotResultKey': 'a', 'NextToken': 'token1'},
@@ -1152,14 +1144,15 @@ class TestIncludeNonResultKeys(unittest.TestCase):
'ResultKey2': ['u', 'v', 'w', 'x', 'y', 'z'],
'NotResultKey': 'a',
}
- self.assertEqual(actual, expected)
+ assert actual == expected
def test_include_with_nested_result_keys(self):
self.paginate_config['result_key'] = 'Result.Key'
self.paginate_config['non_aggregate_keys'] = [
'Outer', 'Result.Inner',
]
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
self.method.side_effect = [
# The non result keys shows hypothetical
# example. This doesn't actually happen,
@@ -1175,13 +1168,13 @@ class TestIncludeNonResultKeys(unittest.TestCase):
]
pages = self.paginator.paginate()
actual = pages.build_full_result()
- self.assertEqual(pages.non_aggregate_part,
- {'Outer': 'v2', 'Result': {'Inner': 'v1'}})
+ assert pages.non_aggregate_part == {
+ 'Outer': 'v2', 'Result': {'Inner': 'v1'}}
expected = {
'Result': {'Key': ['foo', 'bar', 'baz', 'qux'], 'Inner': 'v1'},
'Outer': 'v2',
}
- self.assertEqual(actual, expected)
+ assert actual == expected
class TestSearchOverResults(unittest.TestCase):
@@ -1194,7 +1187,8 @@ class TestSearchOverResults(unittest.TestCase):
'input_token': 'NextToken',
'result_key': 'Foo',
}
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
responses = [
{'Foo': [{'a': 1}, {'b': 2}],
'IsTruncated': True, 'NextToken': '1'},
@@ -1206,19 +1200,19 @@ class TestSearchOverResults(unittest.TestCase):
def test_yields_non_list_values(self):
result = list(self.paginator.paginate().search('Foo[0].a'))
- self.assertEqual([1, 3, 5], result)
+ assert result == [1, 3, 5]
def test_yields_individual_list_values(self):
result = list(self.paginator.paginate().search('Foo[].*[]'))
- self.assertEqual([1, 2, 3, 4, 5], result)
+ assert result == [1, 2, 3, 4, 5]
def test_empty_when_no_match(self):
result = list(self.paginator.paginate().search('Foo[].qux'))
- self.assertEqual([], result)
+ assert result == []
def test_no_yield_when_no_match_on_page(self):
result = list(self.paginator.paginate().search('Foo[].b'))
- self.assertEqual([2, 4], result)
+ assert result == [2, 4]
class TestDeprecatedStartingToken(unittest.TestCase):
@@ -1247,7 +1241,7 @@ class TestDeprecatedStartingToken(unittest.TestCase):
try:
actual = paginator.paginate(
PaginationConfig=pagination_config).build_full_result()
- self.assertEqual(actual, expected)
+ assert actual == expected
except ValueError:
self.fail("Deprecated paginator failed.")
@@ -1343,10 +1337,10 @@ class TestDeprecatedStartingToken(unittest.TestCase):
expected = {'Users': ['User1', 'User2', 'User3']}
paginator = self.create_paginator()
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
actual = paginator.paginate(
PaginationConfig=pagination_config).build_full_result()
- self.assertEqual(actual, expected)
+ assert actual == expected
class TestStringPageSize(unittest.TestCase):
@@ -1405,7 +1399,8 @@ class TestStringPageSize(unittest.TestCase):
self.model = self.service.operation_model('ListStuff')
self.method = mock.Mock()
self.method.side_effect = [{}]
- self.paginator = Paginator(self.method, self.paginate_config, self.model)
+ self.paginator = Paginator(
+ self.method, self.paginate_config, self.model)
def test_int_page_size(self):
res = list(self.paginator.paginate(PaginationConfig={'PageSize': 1}))
diff --git a/tests/unit/test_parsers.py b/tests/unit/test_parsers.py
index ad813641..9b2116ab 100644
--- a/tests/unit/test_parsers.py
+++ b/tests/unit/test_parsers.py
@@ -12,6 +12,7 @@
# language governing permissions and limitations under the License.
from tests import unittest, RawResponse
import datetime
+import pytest
from dateutil.tz import tzutc
@@ -84,11 +85,11 @@ class TestResponseMetadataParsed(unittest.TestCase):
{'body': response,
'headers': {},
'status_code': 200}, output_shape)
- self.assertEqual(
- parsed, {'Str': 'myname',
- 'ResponseMetadata': {'RequestId': 'request-id',
- 'HTTPStatusCode': 200,
- 'HTTPHeaders': {}}})
+ assert parsed == {'Str': 'myname',
+ 'ResponseMetadata':
+ {'RequestId': 'request-id',
+ 'HTTPStatusCode': 200,
+ 'HTTPHeaders': {}}}
def test_metadata_always_exists_for_query(self):
# ResponseMetadata is used for more than just the request id. It
@@ -132,7 +133,7 @@ class TestResponseMetadataParsed(unittest.TestCase):
'HTTPHeaders': {}
}
}
- self.assertEqual(parsed, expected)
+ assert parsed == expected
def test_response_metadata_parsed_for_ec2(self):
parser = parsers.EC2QueryParser()
@@ -158,11 +159,10 @@ class TestResponseMetadataParsed(unittest.TestCase):
'status_code': 200}, output_shape)
# Note that the response metadata is normalized to match the query
# protocol, even though this is not how it appears in the output.
- self.assertEqual(
- parsed, {'Str': 'myname',
- 'ResponseMetadata': {'RequestId': 'request-id',
- 'HTTPStatusCode': 200,
- 'HTTPHeaders': {}}})
+ assert parsed == {'Str': 'myname',
+ 'ResponseMetadata': {'RequestId': 'request-id',
+ 'HTTPStatusCode': 200,
+ 'HTTPHeaders': {}}}
def test_metadata_always_exists_for_ec2(self):
# ResponseMetadata is used for more than just the request id. It
@@ -195,8 +195,7 @@ class TestResponseMetadataParsed(unittest.TestCase):
'HTTPHeaders': {}
}
}
- self.assertEqual(
- parsed, expected)
+ assert parsed == expected
def test_response_metadata_on_json_request(self):
parser = parsers.JSONParser()
@@ -218,11 +217,10 @@ class TestResponseMetadataParsed(unittest.TestCase):
'status_code': 200}, output_shape)
# Note that the response metadata is normalized to match the query
# protocol, even though this is not how it appears in the output.
- self.assertEqual(
- parsed, {'Str': 'mystring',
- 'ResponseMetadata': {'RequestId': 'request-id',
- 'HTTPStatusCode': 200,
- 'HTTPHeaders': headers}})
+ assert parsed == {'Str': 'mystring',
+ 'ResponseMetadata': {'RequestId': 'request-id',
+ 'HTTPStatusCode': 200,
+ 'HTTPHeaders': headers}}
def test_response_no_initial_event_stream(self):
parser = parsers.JSONParser()
@@ -242,7 +240,7 @@ class TestResponseMetadataParsed(unittest.TestCase):
}
})
)
- with self.assertRaises(parsers.ResponseParserError):
+ with pytest.raises(parsers.ResponseParserError):
response_dict = {
'status_code': 200,
'headers': {},
@@ -282,7 +280,7 @@ class TestResponseMetadataParsed(unittest.TestCase):
'HTTPHeaders': headers
}
}
- self.assertEqual(parsed, expected)
+ assert parsed == expected
def test_response_metadata_on_rest_json_response(self):
parser = parsers.RestJSONParser()
@@ -304,11 +302,10 @@ class TestResponseMetadataParsed(unittest.TestCase):
'status_code': 200}, output_shape)
# Note that the response metadata is normalized to match the query
# protocol, even though this is not how it appears in the output.
- self.assertEqual(
- parsed, {'Str': 'mystring',
- 'ResponseMetadata': {'RequestId': 'request-id',
- 'HTTPStatusCode': 200,
- 'HTTPHeaders': headers}})
+ assert parsed == {'Str': 'mystring',
+ 'ResponseMetadata': {'RequestId': 'request-id',
+ 'HTTPStatusCode': 200,
+ 'HTTPHeaders': headers}}
def test_metadata_always_exists_on_rest_json_response(self):
# ResponseMetadata is used for more than just the request id. It
@@ -339,7 +336,7 @@ class TestResponseMetadataParsed(unittest.TestCase):
'HTTPHeaders': headers
}
}
- self.assertEqual(parsed, expected)
+ assert parsed == expected
def test_response_metadata_from_s3_response(self):
# Even though s3 is a rest-xml service, it's response metadata
@@ -353,12 +350,11 @@ class TestResponseMetadataParsed(unittest.TestCase):
parser = parsers.RestXMLParser()
parsed = parser.parse(
{'body': '', 'headers': headers, 'status_code': 200}, None)
- self.assertEqual(
- parsed,
- {'ResponseMetadata': {'RequestId': 'request-id',
- 'HostId': 'second-id',
- 'HTTPStatusCode': 200,
- 'HTTPHeaders': headers}})
+ assert parsed == {
+ 'ResponseMetadata': {'RequestId': 'request-id',
+ 'HostId': 'second-id',
+ 'HTTPStatusCode': 200,
+ 'HTTPHeaders': headers}}
def test_metadata_always_exists_on_rest_xml_response(self):
# ResponseMetadata is used for more than just the request id. It
@@ -374,7 +370,7 @@ class TestResponseMetadataParsed(unittest.TestCase):
'HTTPHeaders': headers
}
}
- self.assertEqual(parsed, expected)
+ assert parsed == expected
class TestHeaderResponseInclusion(unittest.TestCase):
@@ -414,8 +410,7 @@ class TestHeaderResponseInclusion(unittest.TestCase):
'header2': 'bar',
}
# Response headers should be mapped as HTTPHeaders.
- self.assertEqual(
- parsed['ResponseMetadata']['HTTPHeaders'], parsed_headers)
+ assert parsed['ResponseMetadata']['HTTPHeaders'] == parsed_headers
def test_can_always_json_serialize_headers(self):
parser = self.create_parser()
@@ -432,8 +427,8 @@ class TestHeaderResponseInclusion(unittest.TestCase):
# We've had the contract that you can json serialize a
# response. So we want to ensure that despite using a CustomHeaderDict
# we can always JSON dumps the response metadata.
- self.assertEqual(
- json.loads(json.dumps(metadata))['HTTPHeaders']['header1'], 'foo')
+ assert json.loads(json.dumps(metadata))[
+ 'HTTPHeaders']['header1'] == 'foo'
class TestResponseParsingDatetimes(unittest.TestCase):
@@ -451,7 +446,7 @@ class TestResponseParsingDatetimes(unittest.TestCase):
{'body': timestamp_as_float,
'headers': [],
'status_code': 200}, output_shape)
- self.assertEqual(parsed, expected_parsed)
+ assert parsed == expected_parsed
class TestResponseParserFactory(unittest.TestCase):
@@ -460,12 +455,12 @@ class TestResponseParserFactory(unittest.TestCase):
def test_rest_parser(self):
parser = self.factory.create_parser('rest-xml')
- self.assertTrue(isinstance(parser, parsers.BaseRestParser))
- self.assertTrue(isinstance(parser, parsers.BaseXMLResponseParser))
+ assert isinstance(parser, parsers.BaseRestParser)
+ assert isinstance(parser, parsers.BaseXMLResponseParser)
def test_json_parser(self):
parser = self.factory.create_parser('json')
- self.assertTrue(isinstance(parser, parsers.BaseJSONParser))
+ assert isinstance(parser, parsers.BaseJSONParser)
class TestCanDecorateResponseParsing(unittest.TestCase):
@@ -487,7 +482,7 @@ class TestCanDecorateResponseParsing(unittest.TestCase):
parsed = parser.parse(
self.create_request_dict(with_body=hello_world_b64),
output_shape)
- self.assertEqual(parsed, expected_parsed)
+ assert parsed == expected_parsed
def test_can_decorate_scalar_parsing(self):
output_shape = model.Shape(shape_name='BlobType',
@@ -503,7 +498,7 @@ class TestCanDecorateResponseParsing(unittest.TestCase):
parsed = parser.parse(
self.create_request_dict(with_body=hello_world_b64),
output_shape)
- self.assertEqual(parsed, expected_parsed)
+ assert parsed == expected_parsed
def test_can_decorate_timestamp_parser(self):
output_shape = model.Shape(shape_name='datetime',
@@ -520,7 +515,7 @@ class TestCanDecorateResponseParsing(unittest.TestCase):
parsed = parser.parse(
self.create_request_dict(with_body=timestamp_as_int),
output_shape)
- self.assertEqual(parsed, expected_parsed)
+ assert parsed == expected_parsed
class TestHandlesNoOutputShape(unittest.TestCase):
@@ -533,11 +528,10 @@ class TestHandlesNoOutputShape(unittest.TestCase):
parsed = parser.parse(
{'body': b'', 'headers': headers, 'status_code': 200},
output_shape)
- self.assertEqual(
- parsed,
- {'ResponseMetadata': {'RequestId': 'request-id',
- 'HTTPStatusCode': 200,
- 'HTTPHeaders': headers}})
+ assert parsed == {
+ 'ResponseMetadata': {'RequestId': 'request-id',
+ 'HTTPStatusCode': 200,
+ 'HTTPHeaders': headers}}
def test_empty_rest_xml_response(self):
# This is the format used by cloudfront, route53.
@@ -547,11 +541,10 @@ class TestHandlesNoOutputShape(unittest.TestCase):
parsed = parser.parse(
{'body': b'', 'headers': headers, 'status_code': 200},
output_shape)
- self.assertEqual(
- parsed,
- {'ResponseMetadata': {'RequestId': 'request-id',
- 'HTTPStatusCode': 200,
- 'HTTPHeaders': headers}})
+ assert parsed == {
+ 'ResponseMetadata': {'RequestId': 'request-id',
+ 'HTTPStatusCode': 200,
+ 'HTTPHeaders': headers}}
def test_empty_query_response(self):
body = (
@@ -566,11 +559,10 @@ class TestHandlesNoOutputShape(unittest.TestCase):
parsed = parser.parse(
{'body': body, 'headers': {}, 'status_code': 200},
output_shape)
- self.assertEqual(
- parsed,
- {'ResponseMetadata': {'RequestId': 'request-id',
- 'HTTPStatusCode': 200,
- 'HTTPHeaders': {}}})
+ assert parsed == {
+ 'ResponseMetadata': {'RequestId': 'request-id',
+ 'HTTPStatusCode': 200,
+ 'HTTPHeaders': {}}}
def test_empty_json_response(self):
headers = {'x-amzn-requestid': 'request-id'}
@@ -580,11 +572,10 @@ class TestHandlesNoOutputShape(unittest.TestCase):
parsed = parser.parse(
{'body': b'', 'headers': headers, 'status_code': 200},
output_shape)
- self.assertEqual(
- parsed,
- {'ResponseMetadata': {'RequestId': 'request-id',
- 'HTTPStatusCode': 200,
- 'HTTPHeaders': headers}})
+ assert parsed == {
+ 'ResponseMetadata': {'RequestId': 'request-id',
+ 'HTTPStatusCode': 200,
+ 'HTTPHeaders': headers}}
class TestHandlesInvalidXMLResponses(unittest.TestCase):
@@ -597,8 +588,8 @@ class TestHandlesInvalidXMLResponses(unittest.TestCase):
parser = parsers.QueryParser()
output_shape = None
# The XML body should be in the error message.
- with six.assertRaisesRegex(self, parsers.ResponseParserError,
- '<DeleteTagsResponse'):
+ with pytest.raises(parsers.ResponseParserError,
+ match='<DeleteTagsResponse'):
parser.parse(
{'body': invalid_xml, 'headers': {}, 'status_code': 200},
output_shape)
@@ -637,7 +628,7 @@ class TestRESTXMLResponses(unittest.TestCase):
{'body': body, 'headers': headers, 'status_code': 200},
output_shape)
# Ensure the first element is used out of the list.
- self.assertEqual(parsed['Foo'], {'Bar': 'first_value'})
+ assert parsed['Foo'] == {'Bar': 'first_value'}
class TestEventStreamParsers(unittest.TestCase):
@@ -753,7 +744,7 @@ class TestEventStreamParsers(unittest.TestCase):
}
}
}
- self.assertEqual(parsed, expected)
+ assert parsed == expected
def test_parses_event_bad_xml(self):
headers = {
@@ -767,19 +758,19 @@ class TestEventStreamParsers(unittest.TestCase):
'Stats': {}
}
}
- self.assertEqual(parsed, expected)
+ assert parsed == expected
def test_parses_event_blob(self):
headers = {':event-type': 'EventB'}
parsed = self.parse_event(headers, b'blob')
expected = {'EventB': {'Body': b'blob'}}
- self.assertEqual(parsed, expected)
+ assert parsed == expected
def test_parses_event_string(self):
headers = {':event-type': 'EventC'}
parsed = self.parse_event(headers, b'blob')
expected = {'EventC': {'Body': u'blob'}}
- self.assertEqual(parsed, expected)
+ assert parsed == expected
def test_parses_payload_implicit(self):
headers = {
@@ -800,7 +791,7 @@ class TestEventStreamParsers(unittest.TestCase):
'IntField': 1234
}
}
- self.assertEqual(parsed, expected)
+ assert parsed == expected
def test_parses_error_event(self):
error_code = 'client/SomeError'
@@ -818,7 +809,7 @@ class TestEventStreamParsers(unittest.TestCase):
'Message': error_message
}
}
- self.assertEqual(parsed, expected)
+ assert parsed == expected
def test_parses_exception_event(self):
self.parser = parsers.EventStreamJSONParser()
@@ -835,7 +826,7 @@ class TestEventStreamParsers(unittest.TestCase):
'Message': 'You did something wrong'
}
}
- self.assertEqual(parsed, expected)
+ assert parsed == expected
def test_parses_event_json(self):
self.parser = parsers.EventStreamJSONParser()
@@ -853,7 +844,7 @@ class TestEventStreamParsers(unittest.TestCase):
'IntField': 1234
}
}
- self.assertEqual(parsed, expected)
+ assert parsed == expected
class TestParseErrorResponses(unittest.TestCase):
@@ -892,12 +883,12 @@ class TestParseErrorResponses(unittest.TestCase):
parsed = parser.parse(response, None)
# Even (especially) on an error condition, the
# ResponseMetadata should be populated.
- self.assertIn('ResponseMetadata', parsed)
- self.assertEqual(parsed['ResponseMetadata']['RequestId'], 'request-id')
+ assert 'ResponseMetadata' in parsed
+ assert parsed['ResponseMetadata']['RequestId'] == 'request-id'
- self.assertIn('Error', parsed)
- self.assertEqual(parsed['Error']['Message'], 'this is a message')
- self.assertEqual(parsed['Error']['Code'], 'ValidationException')
+ assert 'Error' in parsed
+ assert parsed['Error']['Message'] == 'this is a message'
+ assert parsed['Error']['Code'] == 'ValidationException'
def test_response_metadata_errors_alternate_form_json_protocol(self):
# Sometimes there is no '#' in the __type. We need to be
@@ -914,9 +905,9 @@ class TestParseErrorResponses(unittest.TestCase):
}
}
parsed = parser.parse(response, None)
- self.assertIn('Error', parsed)
- self.assertEqual(parsed['Error']['Message'], 'this is a message')
- self.assertEqual(parsed['Error']['Code'], 'ValidationException')
+ assert 'Error' in parsed
+ assert parsed['Error']['Message'] == 'this is a message'
+ assert parsed['Error']['Code'] == 'ValidationException'
def test_parse_error_response_for_query_protocol(self):
body = (
@@ -932,12 +923,12 @@ class TestParseErrorResponses(unittest.TestCase):
parser = parsers.QueryParser()
parsed = parser.parse({
'body': body, 'headers': {}, 'status_code': 400}, None)
- self.assertIn('Error', parsed)
- self.assertEqual(parsed['Error'], {
+ assert 'Error' in parsed
+ assert parsed['Error'] == {
'Code': 'InvalidInput',
'Message': 'ARN asdf is not valid.',
'Type': 'Sender',
- })
+ }
def test_can_parse_sdb_error_response_query_protocol(self):
body = (
@@ -954,16 +945,16 @@ class TestParseErrorResponses(unittest.TestCase):
parser = parsers.QueryParser()
parsed = parser.parse({
'body': body, 'headers': {}, 'status_code': 500}, None)
- self.assertIn('Error', parsed)
- self.assertEqual(parsed['Error'], {
+ assert 'Error' in parsed
+ assert parsed['Error'] == {
'Code': '1',
'Message': 'msg'
- })
- self.assertEqual(parsed['ResponseMetadata'], {
+ }
+ assert parsed['ResponseMetadata'] == {
'RequestId': 'abc-123',
'HTTPStatusCode': 500,
'HTTPHeaders': {}
- })
+ }
def test_can_parser_ec2_errors(self):
body = (
@@ -980,11 +971,11 @@ class TestParseErrorResponses(unittest.TestCase):
parser = parsers.EC2QueryParser()
parsed = parser.parse({
'body': body, 'headers': {}, 'status_code': 400}, None)
- self.assertIn('Error', parsed)
- self.assertEqual(parsed['Error'], {
+ assert 'Error' in parsed
+ assert parsed['Error'] == {
'Code': 'InvalidInstanceID.NotFound',
'Message': 'The instance ID i-12345 does not exist',
- })
+ }
def test_can_parse_rest_xml_errors(self):
body = (
@@ -1000,12 +991,12 @@ class TestParseErrorResponses(unittest.TestCase):
parser = parsers.RestXMLParser()
parsed = parser.parse({
'body': body, 'headers': {}, 'status_code': 400}, None)
- self.assertIn('Error', parsed)
- self.assertEqual(parsed['Error'], {
+ assert 'Error' in parsed
+ assert parsed['Error'] == {
'Code': 'NoSuchHostedZone',
'Message': 'No hosted zone found with ID: foobar',
'Type': 'Sender',
- })
+ }
def test_can_parse_rest_json_errors(self):
body = (
@@ -1018,11 +1009,11 @@ class TestParseErrorResponses(unittest.TestCase):
parser = parsers.RestJSONParser()
parsed = parser.parse({
'body': body, 'headers': headers, 'status_code': 400}, None)
- self.assertIn('Error', parsed)
- self.assertEqual(parsed['Error'], {
+ assert 'Error' in parsed
+ assert parsed['Error'] == {
'Code': 'ResourceNotFoundException',
'Message': 'Function not found: foo',
- })
+ }
def test_error_response_with_no_body_rest_json(self):
parser = parsers.RestJSONParser()
@@ -1032,15 +1023,15 @@ class TestParseErrorResponses(unittest.TestCase):
parsed = parser.parse({'body': response, 'headers': headers,
'status_code': 504}, output_shape)
- self.assertIn('Error', parsed)
- self.assertEqual(parsed['Error'], {
+ assert 'Error' in parsed
+ assert parsed['Error'] == {
'Code': '504',
'Message': 'Gateway Timeout'
- })
- self.assertEqual(parsed['ResponseMetadata'], {
+ }
+ assert parsed['ResponseMetadata'] == {
'HTTPStatusCode': 504,
'HTTPHeaders': headers
- })
+ }
def test_error_response_with_string_body_rest_json(self):
parser = parsers.RestJSONParser()
@@ -1050,15 +1041,15 @@ class TestParseErrorResponses(unittest.TestCase):
parsed = parser.parse({'body': response, 'headers': headers,
'status_code': 413}, output_shape)
- self.assertIn('Error', parsed)
- self.assertEqual(parsed['Error'], {
+ assert 'Error' in parsed
+ assert parsed['Error'] == {
'Code': '413',
'Message': response.decode('utf-8')
- })
- self.assertEqual(parsed['ResponseMetadata'], {
+ }
+ assert parsed['ResponseMetadata'] == {
'HTTPStatusCode': 413,
'HTTPHeaders': headers
- })
+ }
def test_error_response_with_xml_body_rest_json(self):
parser = parsers.RestJSONParser()
@@ -1072,15 +1063,15 @@ class TestParseErrorResponses(unittest.TestCase):
parsed = parser.parse({'body': response, 'headers': headers,
'status_code': 403}, output_shape)
- self.assertIn('Error', parsed)
- self.assertEqual(parsed['Error'], {
+ assert 'Error' in parsed
+ assert parsed['Error'] == {
'Code': '403',
'Message': response.decode('utf-8')
- })
- self.assertEqual(parsed['ResponseMetadata'], {
+ }
+ assert parsed['ResponseMetadata'] == {
'HTTPStatusCode': 403,
'HTTPHeaders': headers
- })
+ }
def test_s3_error_response(self):
body = (
@@ -1099,20 +1090,20 @@ class TestParseErrorResponses(unittest.TestCase):
parser = parsers.RestXMLParser()
parsed = parser.parse(
{'body': body, 'headers': headers, 'status_code': 400}, None)
- self.assertIn('Error', parsed)
- self.assertEqual(parsed['Error'], {
+ assert 'Error' in parsed
+ assert parsed['Error'] == {
'Code': 'NoSuchBucket',
'Message': 'error message',
'BucketName': 'asdf',
# We don't want the RequestId/HostId because they're already
# present in the ResponseMetadata key.
- })
- self.assertEqual(parsed['ResponseMetadata'], {
+ }
+ assert parsed['ResponseMetadata'] == {
'RequestId': 'request-id',
'HostId': 'second-id',
'HTTPStatusCode': 400,
'HTTPHeaders': headers
- })
+ }
def test_s3_error_response_with_no_body(self):
# If you try to HeadObject a key that does not exist,
@@ -1127,29 +1118,29 @@ class TestParseErrorResponses(unittest.TestCase):
parser = parsers.RestXMLParser()
parsed = parser.parse(
{'body': body, 'headers': headers, 'status_code': 404}, None)
- self.assertIn('Error', parsed)
- self.assertEqual(parsed['Error'], {
+ assert 'Error' in parsed
+ assert parsed['Error'] == {
'Code': '404',
'Message': 'Not Found',
- })
- self.assertEqual(parsed['ResponseMetadata'], {
+ }
+ assert parsed['ResponseMetadata'] == {
'RequestId': 'request-id',
'HostId': 'second-id',
'HTTPStatusCode': 404,
'HTTPHeaders': headers
- })
+ }
def test_can_parse_glacier_error_response(self):
body = (b'{"code":"AccessDeniedException","type":"Client","message":'
b'"Access denied"}')
headers = {
- 'x-amzn-requestid': 'request-id'
+ 'x-amzn-requestid': 'request-id'
}
parser = parsers.RestJSONParser()
parsed = parser.parse(
{'body': body, 'headers': headers, 'status_code': 400}, None)
- self.assertEqual(parsed['Error'], {'Message': 'Access denied',
- 'Code': 'AccessDeniedException'})
+ assert parsed['Error'] == {'Message': 'Access denied',
+ 'Code': 'AccessDeniedException'}
def test_can_parse_restjson_error_code(self):
body = b'''{
@@ -1160,25 +1151,25 @@ class TestParseErrorResponses(unittest.TestCase):
"message": "blah",
"deletes": 0}'''
headers = {
- 'x-amzn-requestid': 'request-id'
+ 'x-amzn-requestid': 'request-id'
}
parser = parsers.RestJSONParser()
parsed = parser.parse(
{'body': body, 'headers': headers, 'status_code': 400}, None)
- self.assertEqual(parsed['Error'], {'Message': 'blah',
- 'Code': 'WasUnableToParseThis'})
+ assert parsed['Error'] == {'Message': 'blah',
+ 'Code': 'WasUnableToParseThis'}
def test_can_parse_with_case_insensitive_keys(self):
body = (b'{"Code":"AccessDeniedException","type":"Client","Message":'
b'"Access denied"}')
headers = {
- 'x-amzn-requestid': 'request-id'
+ 'x-amzn-requestid': 'request-id'
}
parser = parsers.RestJSONParser()
parsed = parser.parse(
{'body': body, 'headers': headers, 'status_code': 400}, None)
- self.assertEqual(parsed['Error'], {'Message': 'Access denied',
- 'Code': 'AccessDeniedException'})
+ assert parsed['Error'] == {'Message': 'Access denied',
+ 'Code': 'AccessDeniedException'}
def test_can_parse_rest_json_modeled_fields(self):
body = (
@@ -1195,7 +1186,7 @@ class TestParseErrorResponses(unittest.TestCase):
expected_parsed = {
'ModeledField': 'Some modeled field',
}
- self.assertEqual(parsed, expected_parsed)
+ assert parsed == expected_parsed
def test_can_parse_rest_xml_modeled_fields(self):
parser = parsers.RestXMLParser()
@@ -1216,7 +1207,7 @@ class TestParseErrorResponses(unittest.TestCase):
expected_parsed = {
'ModeledField': 'Some modeled field',
}
- self.assertEqual(parsed, expected_parsed)
+ assert parsed == expected_parsed
def test_can_parse_ec2_modeled_fields(self):
body = (
@@ -1236,7 +1227,7 @@ class TestParseErrorResponses(unittest.TestCase):
expected_parsed = {
'ModeledField': 'Some modeled field',
}
- self.assertEqual(parsed, expected_parsed)
+ assert parsed == expected_parsed
def test_can_parse_query_modeled_fields(self):
parser = parsers.QueryParser()
@@ -1257,7 +1248,7 @@ class TestParseErrorResponses(unittest.TestCase):
expected_parsed = {
'ModeledField': 'Some modeled field',
}
- self.assertEqual(parsed, expected_parsed)
+ assert parsed == expected_parsed
def test_can_parse_json_modeled_fields(self):
body = (
@@ -1275,14 +1266,14 @@ class TestParseErrorResponses(unittest.TestCase):
expected_parsed = {
'ModeledField': 'Some modeled field',
}
- self.assertEqual(parsed, expected_parsed)
+ assert parsed == expected_parsed
def test_can_parse_route53_with_missing_message(self):
# The message isn't always in the XML response (or even the headers).
# We should be able to handle this gracefully and still at least
# populate a "Message" key so that consumers don't have to
# conditionally check for this.
- body = (
+ body = (
'<ErrorResponse>'
' <Error>'
' <Type>Sender</Type>'
@@ -1295,17 +1286,17 @@ class TestParseErrorResponses(unittest.TestCase):
parsed = parser.parse({
'body': body, 'headers': {}, 'status_code': 400}, None)
error = parsed['Error']
- self.assertEqual(error['Code'], 'InvalidInput')
+ assert error['Code'] == 'InvalidInput'
# Even though there's no <Message /> we should
# still populate an empty string.
- self.assertEqual(error['Message'], '')
+ assert error['Message'] == ''
def test_can_handle_generic_error_message():
# There are times when you can get a service to respond with a generic
# html error page. We should be able to handle this case.
for parser_cls in parsers.PROTOCOL_PARSERS.values():
- generic_html_body = (
+ generic_html_body = (
'<html><body><b>Http/1.1 Service Unavailable</b></body></html>'
).encode('utf-8')
empty_body = b''
@@ -1320,6 +1311,6 @@ def _assert_parses_generic_error(parser, body):
# html error page. We should be able to handle this case.
parsed = parser.parse({
'body': body, 'headers': {}, 'status_code': 503}, None)
- assert parsed['Error'] == \
- {'Code': '503', 'Message': 'Service Unavailable'}
+ assert parsed['Error'] == {
+ 'Code': '503', 'Message': 'Service Unavailable'}
assert parsed['ResponseMetadata']['HTTPStatusCode'] == 503
diff --git a/tests/unit/test_regions.py b/tests/unit/test_regions.py
index 65987407..f6bc02fa 100644
--- a/tests/unit/test_regions.py
+++ b/tests/unit/test_regions.py
@@ -11,6 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
+import pytest
from botocore import regions
from botocore.exceptions import NoRegionError
@@ -113,108 +114,107 @@ class TestEndpointResolver(unittest.TestCase):
}
def test_ensures_region_is_not_none(self):
- with self.assertRaises(NoRegionError):
+ with pytest.raises(NoRegionError):
resolver = regions.EndpointResolver(self._template())
resolver.construct_endpoint('foo', None)
def test_ensures_required_keys_present(self):
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
regions.EndpointResolver({})
def test_returns_empty_list_when_listing_for_different_partition(self):
resolver = regions.EndpointResolver(self._template())
- self.assertEqual([], resolver.get_available_endpoints('ec2', 'bar'))
+ assert resolver.get_available_endpoints('ec2', 'bar') == []
def test_returns_empty_list_when_no_service_found(self):
resolver = regions.EndpointResolver(self._template())
- self.assertEqual([], resolver.get_available_endpoints('what?'))
+ assert resolver.get_available_endpoints('what?') == []
def test_gets_endpoint_names(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.get_available_endpoints(
'ec2', allow_non_regional=True)
- self.assertEqual(['d', 'eu-baz', 'us-bar', 'us-foo'], sorted(result))
+ assert sorted(result) == ['d', 'eu-baz', 'us-bar', 'us-foo']
def test_gets_endpoint_names_for_partition(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.get_available_endpoints(
'ec2', allow_non_regional=True, partition_name='foo')
- self.assertEqual(['foo-1', 'foo-2', 'foo-3'], sorted(result))
+ assert sorted(result) == ['foo-1', 'foo-2', 'foo-3']
def test_list_regional_endpoints_only(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.get_available_endpoints(
'ec2', allow_non_regional=False)
- self.assertEqual(['eu-baz', 'us-bar', 'us-foo'], sorted(result))
+ assert sorted(result) == ['eu-baz', 'us-bar', 'us-foo']
def test_returns_none_when_no_match(self):
resolver = regions.EndpointResolver(self._template())
- self.assertIsNone(resolver.construct_endpoint('foo', 'baz'))
+ assert resolver.construct_endpoint('foo', 'baz') is None
def test_constructs_regionalized_endpoints_for_exact_matches(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('not-regionalized', 'eu-baz')
- self.assertEqual('not-regionalized.eu-baz.amazonaws.com',
- result['hostname'])
- self.assertEqual('aws', result['partition'])
- self.assertEqual('eu-baz', result['endpointName'])
+ assert result['hostname'] == 'not-regionalized.eu-baz.amazonaws.com'
+ assert result['partition'] == 'aws'
+ assert result['endpointName'] == 'eu-baz'
def test_constructs_partition_endpoints_for_real_partition_region(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('not-regionalized', 'us-bar')
- self.assertEqual('not-regionalized', result['hostname'])
- self.assertEqual('aws', result['partition'])
- self.assertEqual('aws', result['endpointName'])
+ assert result['hostname'] == 'not-regionalized'
+ assert result['partition'] == 'aws'
+ assert result['endpointName'] == 'aws'
def test_constructs_partition_endpoints_for_regex_match(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('not-regionalized', 'us-abc')
- self.assertEqual('not-regionalized', result['hostname'])
+ assert result['hostname'] == 'not-regionalized'
def test_constructs_endpoints_for_regionalized_regex_match(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('s3', 'us-abc')
- self.assertEqual('s3.us-abc.amazonaws.com', result['hostname'])
+ assert result['hostname'] == 's3.us-abc.amazonaws.com'
def test_constructs_endpoints_for_unknown_service_but_known_region(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('unknown', 'us-foo')
- self.assertEqual('unknown.us-foo.amazonaws.com', result['hostname'])
+ assert result['hostname'] == 'unknown.us-foo.amazonaws.com'
def test_merges_service_keys(self):
resolver = regions.EndpointResolver(self._template())
us_foo = resolver.construct_endpoint('merge', 'us-foo')
us_bar = resolver.construct_endpoint('merge', 'us-bar')
- self.assertEqual(['http'], us_foo['protocols'])
- self.assertEqual(['v4'], us_foo['signatureVersions'])
- self.assertEqual(['https'], us_bar['protocols'])
- self.assertEqual(['v2'], us_bar['signatureVersions'])
+ assert us_foo['protocols'] == ['http']
+ assert us_foo['signatureVersions'] == ['v4']
+ assert us_bar['protocols'] == ['https']
+ assert us_bar['signatureVersions'] == ['v2']
def test_merges_partition_default_keys_with_no_overwrite(self):
resolver = regions.EndpointResolver(self._template())
resolved = resolver.construct_endpoint('ec2', 'foo-1')
- self.assertEqual('baz', resolved['foo'])
- self.assertEqual(['http'], resolved['protocols'])
+ assert resolved['foo'] == 'baz'
+ assert resolved['protocols'] == ['http']
def test_merges_partition_default_keys_with_overwrite(self):
resolver = regions.EndpointResolver(self._template())
resolved = resolver.construct_endpoint('ec2', 'foo-2')
- self.assertEqual('bar', resolved['foo'])
- self.assertEqual(['http'], resolved['protocols'])
+ assert resolved['foo'] == 'bar'
+ assert resolved['protocols'] == ['http']
def test_gives_hostname_and_common_name_unaltered(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('s3', 'eu-baz')
- self.assertEqual('s3.eu-baz.amazonaws.com', result['sslCommonName'])
- self.assertEqual('foo', result['hostname'])
+ assert result['sslCommonName'] == 's3.eu-baz.amazonaws.com'
+ assert result['hostname'] == 'foo'
def tests_uses_partition_endpoint_when_no_region_provided(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('not-regionalized')
- self.assertEqual('not-regionalized', result['hostname'])
- self.assertEqual('aws', result['endpointName'])
+ assert result['hostname'] == 'not-regionalized'
+ assert result['endpointName'] == 'aws'
def test_returns_dns_suffix_if_available(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('not-regionalized')
- self.assertEqual(result['dnsSuffix'], 'amazonaws.com')
+ assert result['dnsSuffix'] == 'amazonaws.com'
diff --git a/tests/unit/test_response.py b/tests/unit/test_response.py
index d3531162..d333595a 100644
--- a/tests/unit/test_response.py
+++ b/tests/unit/test_response.py
@@ -13,6 +13,7 @@
from tests import unittest
from tests.unit import BaseResponseTest
import datetime
+import pytest
from dateutil.tz import tzutc
from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError
@@ -45,24 +46,21 @@ class TestStreamWrapper(unittest.TestCase):
def assert_lines(self, line_iterator, expected_lines):
for expected_line in expected_lines:
- self.assertEqual(
- next(line_iterator),
- expected_line,
- )
+ assert next(line_iterator) == expected_line
# We should have exhausted the iterator.
- with self.assertRaises(StopIteration):
+ with pytest.raises(StopIteration):
next(line_iterator)
def test_streaming_wrapper_validates_content_length(self):
body = six.BytesIO(b'1234567890')
stream = response.StreamingBody(body, content_length=10)
- self.assertEqual(stream.read(), b'1234567890')
+ assert stream.read() == b'1234567890'
def test_streaming_body_with_invalid_length(self):
body = six.BytesIO(b'123456789')
stream = response.StreamingBody(body, content_length=10)
- with self.assertRaises(IncompleteReadError):
- self.assertEqual(stream.read(9), b'123456789')
+ with pytest.raises(IncompleteReadError):
+ assert stream.read(9) == b'123456789'
# The next read will have nothing returned and raise
# an IncompleteReadError because we were expectd 10 bytes, not 9.
stream.read()
@@ -71,55 +69,55 @@ class TestStreamWrapper(unittest.TestCase):
body = six.BytesIO(b'1234567890')
stream = response.StreamingBody(body, content_length=10)
chunk = stream.read(0)
- self.assertEqual(chunk, b'')
- self.assertEqual(stream.read(), b'1234567890')
+ assert chunk == b''
+ assert stream.read() == b'1234567890'
def test_streaming_body_with_single_read(self):
body = six.BytesIO(b'123456789')
stream = response.StreamingBody(body, content_length=10)
- with self.assertRaises(IncompleteReadError):
+ with pytest.raises(IncompleteReadError):
stream.read()
def test_streaming_body_closes(self):
body = six.BytesIO(b'1234567890')
stream = response.StreamingBody(body, content_length=10)
- self.assertFalse(body.closed)
+ assert not body.closed
stream.close()
- self.assertTrue(body.closed)
+ assert body.closed
def test_default_iter_behavior(self):
body = six.BytesIO(b'a' * 2048)
stream = response.StreamingBody(body, content_length=2048)
chunks = list(stream)
- self.assertEqual(len(chunks), 2)
- self.assertEqual(chunks, [b'a' * 1024, b'a' * 1024])
+ assert len(chunks) == 2
+ assert chunks == [b'a' * 1024, b'a' * 1024]
def test_streaming_body_is_an_iterator(self):
body = six.BytesIO(b'a' * 1024 + b'b' * 1024 + b'c' * 2)
stream = response.StreamingBody(body, content_length=2050)
- self.assertEqual(b'a' * 1024, next(stream))
- self.assertEqual(b'b' * 1024, next(stream))
- self.assertEqual(b'c' * 2, next(stream))
- with self.assertRaises(StopIteration):
+ assert b'a' * 1024 == next(stream)
+ assert b'b' * 1024 == next(stream)
+ assert b'c' * 2 == next(stream)
+ with pytest.raises(StopIteration):
next(stream)
def test_iter_chunks_single_byte(self):
body = six.BytesIO(b'abcde')
stream = response.StreamingBody(body, content_length=5)
chunks = list(stream.iter_chunks(chunk_size=1))
- self.assertEqual(chunks, [b'a', b'b', b'c', b'd', b'e'])
+ assert chunks == [b'a', b'b', b'c', b'd', b'e']
def test_iter_chunks_with_leftover(self):
body = six.BytesIO(b'abcde')
stream = response.StreamingBody(body, content_length=5)
chunks = list(stream.iter_chunks(chunk_size=2))
- self.assertEqual(chunks, [b'ab', b'cd', b'e'])
+ assert chunks == [b'ab', b'cd', b'e']
def test_iter_chunks_single_chunk(self):
body = six.BytesIO(b'abcde')
stream = response.StreamingBody(body, content_length=5)
chunks = list(stream.iter_chunks(chunk_size=1024))
- self.assertEqual(chunks, [b'abcde'])
+ assert chunks == [b'abcde']
def test_streaming_line_iterator(self):
body = six.BytesIO(b'1234567890\n1234567890\n12345')
@@ -163,7 +161,7 @@ class TestStreamWrapper(unittest.TestCase):
return 'http://example.com'
stream = response.StreamingBody(TimeoutBody(), content_length=None)
- with self.assertRaises(ReadTimeoutError):
+ with pytest.raises(ReadTimeoutError):
stream.read()
def test_streaming_line_abstruse_newline_standard(self):
@@ -211,9 +209,8 @@ class TestGetResponse(BaseResponseTest):
operation_model = service_model.operation_model('GetObject')
res = response.get_response(operation_model, http_response)
- self.assertTrue(isinstance(res[1]['Body'], response.StreamingBody))
- self.assertEqual(res[1]['ETag'],
- '"00000000000000000000000000000000"')
+ assert isinstance(res[1]['Body'], response.StreamingBody)
+ assert res[1]['ETag'] == '"00000000000000000000000000000000"'
def test_get_response_streaming_ng(self):
headers = {
diff --git a/tests/unit/test_retryhandler.py b/tests/unit/test_retryhandler.py
index 9abce0fd..c323fce5 100644
--- a/tests/unit/test_retryhandler.py
+++ b/tests/unit/test_retryhandler.py
@@ -14,6 +14,7 @@
# language governing permissions and limitations under the License.
from tests import unittest
+import pytest
from tests import mock
@@ -36,15 +37,15 @@ HTTP_200_RESPONSE.status_code = 200
class TestRetryCheckers(unittest.TestCase):
def assert_should_be_retried(self, response, attempt_number=1,
caught_exception=None):
- self.assertTrue(self.checker(
+ assert self.checker(
response=response, attempt_number=attempt_number,
- caught_exception=caught_exception))
+ caught_exception=caught_exception)
def assert_should_not_be_retried(self, response, attempt_number=1,
caught_exception=None):
- self.assertFalse(self.checker(
+ assert not self.checker(
response=response, attempt_number=attempt_number,
- caught_exception=caught_exception))
+ caught_exception=caught_exception)
def test_status_code_checker(self):
self.checker = retryhandler.HTTPStatusCodeChecker(500)
@@ -64,7 +65,7 @@ class TestRetryCheckers(unittest.TestCase):
# max attempts so we should return False.
self.assert_should_not_be_retried(
(HTTP_500_RESPONSE, response), attempt_number=3)
- self.assertTrue(response['ResponseMetadata']['MaxAttemptsReached'])
+ assert response['ResponseMetadata']['MaxAttemptsReached']
def test_max_attempts_successful(self):
self.checker = retryhandler.MaxAttemptsDecorator(
@@ -122,7 +123,7 @@ class TestRetryCheckers(unittest.TestCase):
def test_value_error_raised_when_missing_response_and_exception(self):
self.checker = retryhandler.ExceptionRaiser()
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
self.checker(1, response=None, caught_exception=None)
@@ -172,47 +173,47 @@ class TestCreateRetryConfiguration(unittest.TestCase):
def test_create_retry_single_checker_service_level(self):
checker = retryhandler.create_checker_from_retry_config(
self.retry_config, operation_name=None)
- self.assertIsInstance(checker, retryhandler.MaxAttemptsDecorator)
+ assert isinstance(checker, retryhandler.MaxAttemptsDecorator)
# We're reaching into internal fields here, but only to check
# that the object is created properly.
- self.assertEqual(checker._max_attempts, 5)
- self.assertIsInstance(checker._checker,
+ assert checker._max_attempts == 5
+ assert isinstance(checker._checker,
retryhandler.ServiceErrorCodeChecker)
- self.assertEqual(checker._checker._error_code, 'Throttling')
- self.assertEqual(checker._checker._status_code, 400)
+ assert checker._checker._error_code == 'Throttling'
+ assert checker._checker._status_code == 400
def test_create_retry_for_operation(self):
checker = retryhandler.create_checker_from_retry_config(
self.retry_config, operation_name='OperationFoo')
- self.assertIsInstance(checker, retryhandler.MaxAttemptsDecorator)
- self.assertEqual(checker._max_attempts, 5)
- self.assertIsInstance(checker._checker,
+ assert isinstance(checker, retryhandler.MaxAttemptsDecorator)
+ assert checker._max_attempts == 5
+ assert isinstance(checker._checker,
retryhandler.MultiChecker)
def test_retry_with_socket_errors(self):
checker = retryhandler.create_checker_from_retry_config(
self.retry_config, operation_name='OperationBar')
- self.assertIsInstance(checker, retryhandler.BaseChecker)
+ assert isinstance(checker, retryhandler.BaseChecker)
all_checkers = checker._checker._checkers
- self.assertIsInstance(all_checkers[0],
+ assert isinstance(all_checkers[0],
retryhandler.ServiceErrorCodeChecker)
- self.assertIsInstance(all_checkers[1],
+ assert isinstance(all_checkers[1],
retryhandler.ExceptionRaiser)
def test_create_retry_handler_with_socket_errors(self):
handler = retryhandler.create_retry_handler(
self.retry_config, operation_name='OperationBar')
exception = EndpointConnectionError(endpoint_url='')
- with self.assertRaises(EndpointConnectionError):
+ with pytest.raises(EndpointConnectionError):
handler(response=None, attempts=10,
caught_exception=exception)
# No connection error raised because attempts < max_attempts.
sleep_time = handler(response=None, attempts=1,
caught_exception=exception)
- self.assertEqual(sleep_time, 1)
+ assert sleep_time == 1
# But any other exception should be raised even if
# attempts < max_attempts.
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
sleep_time = handler(response=None, attempts=1,
caught_exception=ValueError())
@@ -223,16 +224,16 @@ class TestCreateRetryConfiguration(unittest.TestCase):
self.retry_config, operation_name='OperationBar')
sleep_time = handler(response=None, attempts=1,
caught_exception=ReadTimeoutError(endpoint_url=''))
- self.assertEqual(sleep_time, 1)
+ assert sleep_time == 1
def test_create_retry_handler_with_no_operation(self):
handler = retryhandler.create_retry_handler(
self.retry_config, operation_name=None)
- self.assertIsInstance(handler, retryhandler.RetryHandler)
+ assert isinstance(handler, retryhandler.RetryHandler)
# No good way to test for the delay function as the action
# other than to just invoke it.
- self.assertEqual(handler._action(attempts=2), 2)
- self.assertEqual(handler._action(attempts=3), 4)
+ assert handler._action(attempts=2) == 2
+ assert handler._action(attempts=3) == 4
def test_crc32_check_propogates_error(self):
handler = retryhandler.create_retry_handler(
@@ -244,9 +245,9 @@ class TestCreateRetryConfiguration(unittest.TestCase):
http_response.headers = {'x-amz-crc32': 2356372768}
http_response.content = b'foo'
# The first 10 attempts we get a retry.
- self.assertEqual(handler(response=(http_response, {}), attempts=1,
- caught_exception=None), 1)
- with self.assertRaises(ChecksumError):
+ assert handler(response=(http_response, {}), attempts=1,
+ caught_exception=None) == 1
+ with pytest.raises(ChecksumError):
handler(response=(http_response, {}), attempts=10,
caught_exception=None)
@@ -260,14 +261,10 @@ class TestRetryHandler(unittest.TestCase):
handler = retryhandler.RetryHandler(checker, delay_function)
response = (HTTP_500_RESPONSE, {})
- self.assertEqual(
- handler(response=response, attempts=1, caught_exception=None), 1)
- self.assertEqual(
- handler(response=response, attempts=2, caught_exception=None), 2)
- self.assertEqual(
- handler(response=response, attempts=3, caught_exception=None), 4)
- self.assertEqual(
- handler(response=response, attempts=4, caught_exception=None), 8)
+ assert handler(response=response, attempts=1, caught_exception=None) == 1
+ assert handler(response=response, attempts=2, caught_exception=None) == 2
+ assert handler(response=response, attempts=3, caught_exception=None) == 4
+ assert handler(response=response, attempts=4, caught_exception=None) == 8
def test_none_response_when_no_matches(self):
delay_function = retryhandler.create_exponential_delay_function( 1, 2)
@@ -275,8 +272,7 @@ class TestRetryHandler(unittest.TestCase):
handler = retryhandler.RetryHandler(checker, delay_function)
response = (HTTP_200_RESPONSE, {})
- self.assertIsNone(handler(response=response, attempts=1,
- caught_exception=None))
+ assert handler(response=response, attempts=1, caught_exception=None) is None
class TestCRC32Checker(unittest.TestCase):
@@ -290,18 +286,18 @@ class TestCRC32Checker(unittest.TestCase):
# pass the crc32 check.
http_response.headers = {'x-amz-crc32': 2356372769}
http_response.content = b'foo'
- self.assertIsNone(self.checker(
+ assert self.checker(
response=(http_response, {}), attempt_number=1,
- caught_exception=None))
+ caught_exception=None) is None
def test_crc32_missing(self):
# It's not an error is the crc32 header is missing.
http_response = mock.Mock()
http_response.status_code = 200
http_response.headers = {}
- self.assertIsNone(self.checker(
+ assert self.checker(
response=(http_response, {}), attempt_number=1,
- caught_exception=None))
+ caught_exception=None) is None
def test_crc32_check_fails(self):
http_response = mock.Mock()
@@ -310,26 +306,26 @@ class TestCRC32Checker(unittest.TestCase):
# fail the crc32 check.
http_response.headers = {'x-amz-crc32': 2356372768}
http_response.content = b'foo'
- with self.assertRaises(ChecksumError):
+ with pytest.raises(ChecksumError):
self.checker(response=(http_response, {}), attempt_number=1,
caught_exception=None)
class TestDelayExponential(unittest.TestCase):
def test_delay_with_numeric_base(self):
- self.assertEqual(retryhandler.delay_exponential(base=3,
+ assert retryhandler.delay_exponential(base=3,
growth_factor=2,
- attempts=3), 12)
+ attempts=3) == 12
def test_delay_with_rand_string(self):
delay = retryhandler.delay_exponential(base='rand',
growth_factor=2,
attempts=3)
# 2 ** (3 - 1) == 4, so the retry is between 0, 4.
- self.assertTrue(0 <= delay <= 4)
+ assert 0 <= delay <= 4
def test_value_error_raised_with_non_positive_number(self):
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
retryhandler.delay_exponential(
base=-1, growth_factor=2, attempts=3)
diff --git a/tests/unit/test_s3_addressing.py b/tests/unit/test_s3_addressing.py
index 4dbff04b..d0bdd68b 100644
--- a/tests/unit/test_s3_addressing.py
+++ b/tests/unit/test_s3_addressing.py
@@ -14,6 +14,7 @@
# language governing permissions and limitations under the License.
import os
+import pytest
from tests import BaseSessionTest, ClientHTTPStubber
@@ -49,58 +50,48 @@ class TestS3Addressing(BaseSessionTest):
params = {'Bucket': 'safename'}
prepared_request = self.get_prepared_request('list_objects', params,
force_hmacv1=True)
- self.assertEqual(prepared_request.url,
- 'https://safename.s3.amazonaws.com/')
+ assert prepared_request.url == 'https://safename.s3.amazonaws.com/'
def test_list_objects_non_dns_name(self):
params = {'Bucket': 'un_safe_name'}
prepared_request = self.get_prepared_request('list_objects', params,
force_hmacv1=True)
- self.assertEqual(prepared_request.url,
- 'https://s3.amazonaws.com/un_safe_name')
+ assert prepared_request.url == 'https://s3.amazonaws.com/un_safe_name'
def test_list_objects_dns_name_non_classic(self):
self.region_name = 'us-west-2'
params = {'Bucket': 'safename'}
prepared_request = self.get_prepared_request('list_objects', params,
force_hmacv1=True)
- self.assertEqual(prepared_request.url,
- 'https://safename.s3.us-west-2.amazonaws.com/')
+ assert prepared_request.url == 'https://safename.s3.us-west-2.amazonaws.com/'
def test_list_objects_unicode_query_string_eu_central_1(self):
self.region_name = 'eu-central-1'
params = OrderedDict([('Bucket', 'safename'),
('Marker', u'\xe4\xf6\xfc-01.txt')])
prepared_request = self.get_prepared_request('list_objects', params)
- self.assertEqual(
- prepared_request.url,
- ('https://safename.s3.eu-central-1.amazonaws.com/'
- '?marker=%C3%A4%C3%B6%C3%BC-01.txt')
- )
+ assert prepared_request.url == ('https://safename.s3.eu-central-1.amazonaws.com/'
+ '?marker=%C3%A4%C3%B6%C3%BC-01.txt')
def test_list_objects_in_restricted_regions(self):
self.region_name = 'us-gov-west-1'
params = {'Bucket': 'safename'}
prepared_request = self.get_prepared_request('list_objects', params)
# Note how we keep the region specific endpoint here.
- self.assertEqual(prepared_request.url,
- 'https://safename.s3.us-gov-west-1.amazonaws.com/')
+ assert prepared_request.url == 'https://safename.s3.us-gov-west-1.amazonaws.com/'
def test_list_objects_in_fips(self):
self.region_name = 'fips-us-gov-west-1'
params = {'Bucket': 'safename'}
prepared_request = self.get_prepared_request('list_objects', params)
# Note how we keep the region specific endpoint here.
- self.assertEqual(
- prepared_request.url,
- 'https://safename.s3-fips.us-gov-west-1.amazonaws.com/')
+ assert prepared_request.url == 'https://safename.s3-fips.us-gov-west-1.amazonaws.com/'
def test_list_objects_non_dns_name_non_classic(self):
self.region_name = 'us-west-2'
params = {'Bucket': 'un_safe_name'}
prepared_request = self.get_prepared_request('list_objects', params)
- self.assertEqual(prepared_request.url,
- 'https://s3.us-west-2.amazonaws.com/un_safe_name')
+ assert prepared_request.url == 'https://s3.us-west-2.amazonaws.com/un_safe_name'
def test_put_object_dns_name_non_classic(self):
self.region_name = 'us-west-2'
@@ -116,9 +107,7 @@ class TestS3Addressing(BaseSessionTest):
'ContentType': 'text/plain'
}
prepared_request = self.get_prepared_request('put_object', params)
- self.assertEqual(
- prepared_request.url,
- 'https://s3.us-west-2.amazonaws.com/my.valid.name/mykeyname')
+ assert prepared_request.url == 'https://s3.us-west-2.amazonaws.com/my.valid.name/mykeyname'
def test_put_object_dns_name_classic(self):
self.region_name = 'us-east-1'
@@ -134,9 +123,7 @@ class TestS3Addressing(BaseSessionTest):
'ContentType': 'text/plain'
}
prepared_request = self.get_prepared_request('put_object', params)
- self.assertEqual(
- prepared_request.url,
- 'https://s3.amazonaws.com/my.valid.name/mykeyname')
+ assert prepared_request.url == 'https://s3.amazonaws.com/my.valid.name/mykeyname'
def test_put_object_dns_name_single_letter_non_classic(self):
self.region_name = 'us-west-2'
@@ -152,9 +139,7 @@ class TestS3Addressing(BaseSessionTest):
'ContentType': 'text/plain'
}
prepared_request = self.get_prepared_request('put_object', params)
- self.assertEqual(
- prepared_request.url,
- 'https://s3.us-west-2.amazonaws.com/a.valid.name/mykeyname')
+ assert prepared_request.url == 'https://s3.us-west-2.amazonaws.com/a.valid.name/mykeyname'
def test_get_object_non_dns_name_non_classic(self):
self.region_name = 'us-west-2'
@@ -163,9 +148,7 @@ class TestS3Addressing(BaseSessionTest):
'Key': 'mykeyname'
}
prepared_request = self.get_prepared_request('get_object', params)
- self.assertEqual(
- prepared_request.url,
- 'https://s3.us-west-2.amazonaws.com/AnInvalidName/mykeyname')
+ assert prepared_request.url == 'https://s3.us-west-2.amazonaws.com/AnInvalidName/mykeyname'
def test_get_object_non_dns_name_classic(self):
self.region_name = 'us-east-1'
@@ -174,8 +157,7 @@ class TestS3Addressing(BaseSessionTest):
'Key': 'mykeyname'
}
prepared_request = self.get_prepared_request('get_object', params)
- self.assertEqual(prepared_request.url,
- 'https://s3.amazonaws.com/AnInvalidName/mykeyname')
+ assert prepared_request.url == 'https://s3.amazonaws.com/AnInvalidName/mykeyname'
def test_get_object_ip_address_name_non_classic(self):
self.region_name = 'us-west-2'
@@ -183,9 +165,7 @@ class TestS3Addressing(BaseSessionTest):
'Bucket': '192.168.5.4',
'Key': 'mykeyname'}
prepared_request = self.get_prepared_request('get_object', params)
- self.assertEqual(
- prepared_request.url,
- 'https://s3.us-west-2.amazonaws.com/192.168.5.4/mykeyname')
+ assert prepared_request.url == 'https://s3.us-west-2.amazonaws.com/192.168.5.4/mykeyname'
def test_get_object_almost_an_ip_address_name_non_classic(self):
self.region_name = 'us-west-2'
@@ -193,12 +173,10 @@ class TestS3Addressing(BaseSessionTest):
'Bucket': '192.168.5.256',
'Key': 'mykeyname'}
prepared_request = self.get_prepared_request('get_object', params)
- self.assertEqual(
- prepared_request.url,
- 'https://s3.us-west-2.amazonaws.com/192.168.5.256/mykeyname')
+ assert prepared_request.url == 'https://s3.us-west-2.amazonaws.com/192.168.5.256/mykeyname'
def test_invalid_endpoint_raises_exception(self):
- with six.assertRaisesRegex(self, ValueError, 'Invalid region'):
+ with pytest.raises(ValueError, match='Invalid region'):
self.session.create_client('s3', 'Invalid region')
def test_non_existent_region(self):
@@ -207,7 +185,7 @@ class TestS3Addressing(BaseSessionTest):
client = self.session.create_client('s3', 'us-west-111')
# Then the default endpoint heuristic will apply and we'll
# get the region name as specified.
- self.assertEqual(client.meta.region_name, 'us-west-111')
+ assert client.meta.region_name == 'us-west-111'
# Why not fixed this? Well backwards compatibility for one thing.
# The other reason is because it was intended to accommodate this
# use case. Let's say I have us-west-2 set as my default region,
@@ -216,8 +194,8 @@ class TestS3Addressing(BaseSessionTest):
client = self.session.create_client('iam', 'us-west-2')
# Instead of giving the user an error, we should instead give
# them the partition-global endpoint.
- self.assertEqual(client.meta.region_name, 'aws-global')
+ assert client.meta.region_name == 'aws-global'
# But if they request an endpoint that we *do* know about, we use
# that specific endpoint.
client = self.session.create_client('iam', 'aws-us-gov-global')
- self.assertEqual(client.meta.region_name, 'aws-us-gov-global')
+ assert client.meta.region_name == 'aws-us-gov-global'
diff --git a/tests/unit/test_serialize.py b/tests/unit/test_serialize.py
index 4f7cbe9b..9094247c 100644
--- a/tests/unit/test_serialize.py
+++ b/tests/unit/test_serialize.py
@@ -15,6 +15,7 @@ import base64
import json
import datetime
import dateutil.tz
+import pytest
from tests import unittest
from botocore.model import ServiceModel
@@ -67,7 +68,7 @@ class BaseModelWithBlob(unittest.TestCase):
# as str types so we need to decode back. We know that this is
# ascii so it's safe to use the ascii encoding.
expected = encoded.decode('ascii')
- self.assertEqual(request['body']['Blob'], expected)
+ assert request['body']['Blob'] == expected
class TestBinaryTypes(BaseModelWithBlob):
@@ -106,9 +107,7 @@ class TestBinaryTypesJSON(BaseModelWithBlob):
body = b'bytes body'
request = self.serialize_to_request(input_params={'Blob': body})
serialized_blob = json.loads(request['body'].decode('utf-8'))['Blob']
- self.assertEqual(
- base64.b64encode(body).decode('ascii'),
- serialized_blob)
+ assert base64.b64encode(body).decode('ascii') == serialized_blob
class TestBinaryTypesWithRestXML(BaseModelWithBlob):
@@ -128,19 +127,19 @@ class TestBinaryTypesWithRestXML(BaseModelWithBlob):
def test_blob_serialization_with_file_like_object(self):
body = six.BytesIO(b'foobar')
request = self.serialize_to_request(input_params={'Blob': body})
- self.assertEqual(request['body'], body)
+ assert request['body'] == body
def test_blob_serialization_when_payload_is_unicode(self):
# When the body is a text type, we should encode the
# text to bytes.
body = u'\u2713'
request = self.serialize_to_request(input_params={'Blob': body})
- self.assertEqual(request['body'], body.encode('utf-8'))
+ assert request['body'] == body.encode('utf-8')
def test_blob_serialization_when_payload_is_bytes(self):
body = b'bytes body'
request = self.serialize_to_request(input_params={'Blob': body})
- self.assertEqual(request['body'], body)
+ assert request['body'] == body
class TestTimestampHeadersWithRestXML(unittest.TestCase):
@@ -187,32 +186,27 @@ class TestTimestampHeadersWithRestXML(unittest.TestCase):
request = self.serialize_to_request(
{'TimestampHeader': datetime.datetime(2014, 1, 1, 12, 12, 12,
tzinfo=dateutil.tz.tzutc())})
- self.assertEqual(request['headers']['x-timestamp'],
- 'Wed, 01 Jan 2014 12:12:12 GMT')
+ assert request['headers']['x-timestamp'] == 'Wed, 01 Jan 2014 12:12:12 GMT'
def test_accepts_iso_8601_format(self):
request = self.serialize_to_request(
{'TimestampHeader': '2014-01-01T12:12:12+00:00'})
- self.assertEqual(request['headers']['x-timestamp'],
- 'Wed, 01 Jan 2014 12:12:12 GMT')
+ assert request['headers']['x-timestamp'] == 'Wed, 01 Jan 2014 12:12:12 GMT'
def test_accepts_iso_8601_format_non_utc(self):
request = self.serialize_to_request(
{'TimestampHeader': '2014-01-01T07:12:12-05:00'})
- self.assertEqual(request['headers']['x-timestamp'],
- 'Wed, 01 Jan 2014 12:12:12 GMT')
+ assert request['headers']['x-timestamp'] == 'Wed, 01 Jan 2014 12:12:12 GMT'
def test_accepts_rfc_822_format(self):
request = self.serialize_to_request(
{'TimestampHeader': 'Wed, 01 Jan 2014 12:12:12 GMT'})
- self.assertEqual(request['headers']['x-timestamp'],
- 'Wed, 01 Jan 2014 12:12:12 GMT')
+ assert request['headers']['x-timestamp'] == 'Wed, 01 Jan 2014 12:12:12 GMT'
def test_accepts_unix_timestamp_integer(self):
request = self.serialize_to_request(
{'TimestampHeader': 1388578332})
- self.assertEqual(request['headers']['x-timestamp'],
- 'Wed, 01 Jan 2014 12:12:12 GMT')
+ assert request['headers']['x-timestamp'] == 'Wed, 01 Jan 2014 12:12:12 GMT'
class TestTimestamps(unittest.TestCase):
@@ -255,17 +249,17 @@ class TestTimestamps(unittest.TestCase):
request = self.serialize_to_request(
{'Timestamp': datetime.datetime(2014, 1, 1, 12, 12, 12,
tzinfo=dateutil.tz.tzutc())})
- self.assertEqual(request['body']['Timestamp'], '2014-01-01T12:12:12Z')
+ assert request['body']['Timestamp'] == '2014-01-01T12:12:12Z'
def test_accepts_naive_datetime_object(self):
request = self.serialize_to_request(
{'Timestamp': datetime.datetime(2014, 1, 1, 12, 12, 12)})
- self.assertEqual(request['body']['Timestamp'], '2014-01-01T12:12:12Z')
+ assert request['body']['Timestamp'] == '2014-01-01T12:12:12Z'
def test_accepts_iso_8601_format(self):
request = self.serialize_to_request(
{'Timestamp': '2014-01-01T12:12:12Z'})
- self.assertEqual(request['body']['Timestamp'], '2014-01-01T12:12:12Z')
+ assert request['body']['Timestamp'] == '2014-01-01T12:12:12Z'
def test_accepts_timestamp_without_tz_info(self):
# If a timezone/utc is not specified, assume they meant
@@ -273,13 +267,12 @@ class TestTimestamps(unittest.TestCase):
# of botocore so we want to make sure we preserve this behavior.
request = self.serialize_to_request(
{'Timestamp': '2014-01-01T12:12:12'})
- self.assertEqual(request['body']['Timestamp'], '2014-01-01T12:12:12Z')
+ assert request['body']['Timestamp'] == '2014-01-01T12:12:12Z'
def test_microsecond_timestamp_without_tz_info(self):
request = self.serialize_to_request(
{'Timestamp': '2014-01-01T12:12:12.123456'})
- self.assertEqual(request['body']['Timestamp'],
- '2014-01-01T12:12:12.123456Z')
+ assert request['body']['Timestamp'] == '2014-01-01T12:12:12.123456Z'
class TestJSONTimestampSerialization(unittest.TestCase):
@@ -322,21 +315,21 @@ class TestJSONTimestampSerialization(unittest.TestCase):
def test_accepts_iso_8601_format(self):
body = json.loads(self.serialize_to_request(
{'Timestamp': '1970-01-01T00:00:00'})['body'].decode('utf-8'))
- self.assertEqual(body['Timestamp'], 0)
+ assert body['Timestamp'] == 0
def test_accepts_epoch(self):
body = json.loads(self.serialize_to_request(
{'Timestamp': '0'})['body'].decode('utf-8'))
- self.assertEqual(body['Timestamp'], 0)
+ assert body['Timestamp'] == 0
# Can also be an integer 0.
body = json.loads(self.serialize_to_request(
{'Timestamp': 0})['body'].decode('utf-8'))
- self.assertEqual(body['Timestamp'], 0)
+ assert body['Timestamp'] == 0
def test_accepts_partial_iso_format(self):
body = json.loads(self.serialize_to_request(
{'Timestamp': '1970-01-01'})['body'].decode('utf-8'))
- self.assertEqual(body['Timestamp'], 0)
+ assert body['Timestamp'] == 0
class TestInstanceCreation(unittest.TestCase):
@@ -376,7 +369,7 @@ class TestInstanceCreation(unittest.TestCase):
{'Timestamp': valid_string},
self.service_model.operation_model('TestOperation'))
- self.assertEqual(request['body']['Timestamp'], valid_string)
+ assert request['body']['Timestamp'] == valid_string
def assert_serialize_invalid_parameter(self, request_serializer):
invalid_string = 'short string'
@@ -384,7 +377,7 @@ class TestInstanceCreation(unittest.TestCase):
{'Timestamp': invalid_string},
self.service_model.operation_model('TestOperation'))
- self.assertEqual(request['body']['Timestamp'], invalid_string)
+ assert request['body']['Timestamp'] == invalid_string
def test_instantiate_without_validation(self):
request_serializer = serialize.create_serializer(
@@ -411,7 +404,7 @@ class TestInstanceCreation(unittest.TestCase):
self.fail(
"Shouldn't fail serializing valid parameter with validation".format(e))
- with self.assertRaises(ParamValidationError):
+ with pytest.raises(ParamValidationError):
self.assert_serialize_invalid_parameter(request_serializer)
@@ -451,7 +444,7 @@ class TestHeaderSerialization(BaseModelWithBlob):
def test_always_serialized_as_str(self):
request = self.serialize_to_request({'ContentLength': 100})
- self.assertEqual(request['headers']['Content-Length'], '100')
+ assert request['headers']['Content-Length'] == '100'
class TestRestXMLUnicodeSerialization(unittest.TestCase):
diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py
index b299b2cd..249b7831 100644
--- a/tests/unit/test_session.py
+++ b/tests/unit/test_session.py
@@ -18,6 +18,7 @@ import os
import logging
import tempfile
import shutil
+import pytest
from tests import mock
@@ -126,17 +127,17 @@ class SessionTest(BaseSessionTest):
)
self.environ['BAR_DEFAULT_PROFILE'] = 'first'
self.environ['BAR_PROFILE'] = 'second'
- self.assertEqual(self.session.get_config_variable('profile'), 'first')
+ assert self.session.get_config_variable('profile') == 'first'
def test_profile_when_set_explicitly(self):
session = create_session(profile='asdf')
- self.assertEqual(session.profile, 'asdf')
+ assert session.profile == 'asdf'
def test_profile_when_pulled_from_env(self):
self.environ['FOO_PROFILE'] = 'bar'
# Even though we didn't explicitly pass in a profile, the
# profile property will still look this up for us.
- self.assertEqual(self.session.profile, 'bar')
+ assert self.session.profile == 'bar'
def test_multiple_env_vars_uses_second_var(self):
self.update_session_config_mapping(
@@ -144,27 +145,27 @@ class SessionTest(BaseSessionTest):
)
self.environ.pop('BAR_DEFAULT_PROFILE', None)
self.environ['BAR_PROFILE'] = 'second'
- self.assertEqual(self.session.get_config_variable('profile'), 'second')
+ assert self.session.get_config_variable('profile') == 'second'
def test_profile_does_not_exist_raises_exception(self):
# Given we have no profile:
self.environ['FOO_PROFILE'] = 'profile_that_does_not_exist'
- with self.assertRaises(botocore.exceptions.ProfileNotFound):
+ with pytest.raises(botocore.exceptions.ProfileNotFound):
self.session.get_scoped_config()
def test_variable_does_not_exist(self):
- self.assertIsNone(self.session.get_config_variable('foo/bar'))
+ assert self.session.get_config_variable('foo/bar') is None
def test_get_aws_services_in_alphabetical_order(self):
services = self.session.get_available_services()
- self.assertEqual(sorted(services), services)
+ assert sorted(services) == services
def test_profile_does_not_exist_with_default_profile(self):
config = self.session.get_scoped_config()
# We should have loaded this properly, and we'll check
# that foo_access_key which is defined in the config
# file should be present in the loaded config dict.
- self.assertIn('aws_access_key_id', config)
+ assert 'aws_access_key_id' in config
def test_type_conversions_occur_when_specified(self):
# Specify that we can retrieve the var from the
@@ -178,8 +179,8 @@ class SessionTest(BaseSessionTest):
# Environment variables are always strings.
self.environ['FOO_TIMEOUT'] = '10'
# But we should type convert this to a string.
- self.assertEqual(
- self.session.get_config_variable('metadata_service_timeout'), 10)
+ assert self.session.get_config_variable(
+ 'metadata_service_timeout') == 10
def test_default_profile_specified_raises_exception(self):
# If you explicity set the default profile and you don't
@@ -191,7 +192,7 @@ class SessionTest(BaseSessionTest):
# In this case, even though we specified default, because
# the boto_config_empty config file does not have a default
# profile, we should be raising an exception.
- with self.assertRaises(botocore.exceptions.ProfileNotFound):
+ with pytest.raises(botocore.exceptions.ProfileNotFound):
self.session.get_scoped_config()
def test_file_logger(self):
@@ -200,15 +201,15 @@ class SessionTest(BaseSessionTest):
self.session.set_file_logger(logging.DEBUG, temp_file)
self.addCleanup(self.close_log_file_handler, tempdir, temp_file)
self.session.get_credentials()
- self.assertTrue(os.path.isfile(temp_file))
+ assert os.path.isfile(temp_file)
with open(temp_file) as logfile:
s = logfile.read()
- self.assertTrue('Looking for credentials' in s)
+ assert 'Looking for credentials' in s
def test_full_config_property(self):
full_config = self.session.full_config
- self.assertTrue('foo' in full_config['profiles'])
- self.assertTrue('default' in full_config['profiles'])
+ assert 'foo' in full_config['profiles']
+ assert 'default' in full_config['profiles']
def test_full_config_merges_creds_file_data(self):
with temporary_file('w') as f:
@@ -219,9 +220,9 @@ class SessionTest(BaseSessionTest):
f.flush()
full_config = self.session.full_config
- self.assertEqual(full_config['profiles']['newprofile'],
- {'aws_access_key_id': 'FROM_CREDS_FILE_1',
- 'aws_secret_access_key': 'FROM_CREDS_FILE_2'})
+ assert full_config['profiles']['newprofile'] == {
+ 'aws_access_key_id': 'FROM_CREDS_FILE_1',
+ 'aws_secret_access_key': 'FROM_CREDS_FILE_2'}
def test_path_not_in_available_profiles(self):
with temporary_file('w') as f:
@@ -232,17 +233,16 @@ class SessionTest(BaseSessionTest):
f.flush()
profiles = self.session.available_profiles
- self.assertEqual(
- set(profiles),
- set(['foo', 'default', 'newprofile']))
+ assert set(profiles) == set([
+ 'foo', 'default', 'newprofile'])
def test_emit_delegates_to_emitter(self):
calls = []
handler = lambda **kwargs: calls.append(kwargs)
self.session.register('foo', handler)
self.session.emit('foo')
- self.assertEqual(len(calls), 1)
- self.assertEqual(calls[0]['event_name'], 'foo')
+ assert len(calls) == 1
+ assert calls[0]['event_name'] == 'foo'
def test_emitter_can_be_passed_in(self):
events = HierarchicalEmitter()
@@ -252,14 +252,14 @@ class SessionTest(BaseSessionTest):
events.register('foo', handler)
session.emit('foo')
- self.assertEqual(len(calls), 1)
+ assert len(calls) == 1
def test_emit_first_non_none(self):
self.session.register('foo', lambda **kwargs: None)
self.session.register('foo', lambda **kwargs: 'first')
self.session.register('foo', lambda **kwargs: 'second')
response = self.session.emit_first_non_none_response('foo')
- self.assertEqual(response, 'first')
+ assert response, 'first'
@mock.patch('logging.getLogger')
@mock.patch('logging.FileHandler')
@@ -285,11 +285,11 @@ class SessionTest(BaseSessionTest):
handler = lambda **kwargs: calls.append(kwargs)
self.session.register('foo', handler, unique_id='bar')
self.session.emit('foo')
- self.assertEqual(calls[0]['event_name'], 'foo')
+ assert calls[0]['event_name'] == 'foo'
calls = []
self.session.unregister('foo', unique_id='bar')
self.session.emit('foo')
- self.assertEqual(calls, [])
+ assert calls == []
class TestBuiltinEventHandlers(BaseSessionTest):
@@ -313,7 +313,7 @@ class TestBuiltinEventHandlers(BaseSessionTest):
def test_registered_builtin_handlers(self):
session = create_session(include_builtin_handlers=True)
session.emit('foo')
- self.assertTrue(self.foo_called)
+ assert self.foo_called
class TestSessionConfigurationVars(BaseSessionTest):
@@ -325,20 +325,19 @@ class TestSessionConfigurationVars(BaseSessionTest):
default='default',
)
# Default value.
- self.assertEqual(self.session.get_config_variable('foobar'), 'default')
+ assert self.session.get_config_variable('foobar') == 'default'
# Retrieve from os environment variable.
self.environ['FOOBAR'] = 'fromenv'
- self.assertEqual(self.session.get_config_variable('foobar'), 'fromenv')
+ assert self.session.get_config_variable('foobar') == 'fromenv'
# Explicit override.
self.session.set_config_variable('foobar', 'session-instance')
- self.assertEqual(self.session.get_config_variable('foobar'),
- 'session-instance')
+ assert self.session.get_config_variable('foobar') == 'session-instance'
# Back to default value.
del self.environ['FOOBAR']
self.session.set_config_variable('foobar', None)
- self.assertEqual(self.session.get_config_variable('foobar'), 'default')
+ assert self.session.get_config_variable('foobar') == 'default'
def test_default_value_can_be_overriden(self):
self.update_session_config_mapping(
@@ -347,17 +346,17 @@ class TestSessionConfigurationVars(BaseSessionTest):
env_var_names='FOOBAR',
default='default',
)
- self.assertEqual(self.session.get_config_variable('foobar'), 'default')
+ assert self.session.get_config_variable('foobar') == 'default'
def test_can_get_with_methods(self):
self.environ['AWS_DEFAULT_REGION'] = 'env-var'
self.session.set_config_variable('region', 'instance-var')
value = self.session.get_config_variable('region')
- self.assertEqual(value, 'instance-var')
+ assert value == 'instance-var'
value = self.session.get_config_variable(
'region', methods=('env',))
- self.assertEqual(value, 'env-var')
+ assert value == 'env-var'
class TestSessionPartitionFiles(BaseSessionTest):
@@ -366,7 +365,7 @@ class TestSessionPartitionFiles(BaseSessionTest):
mock_resolver.get_available_partitions.return_value = ['foo']
self.session._register_internal_component(
'endpoint_resolver', mock_resolver)
- self.assertEqual(['foo'], self.session.get_available_partitions())
+ assert ['foo'] == self.session.get_available_partitions()
def test_proxies_list_endpoints_to_resolver(self):
resolver = mock.Mock()
@@ -377,36 +376,35 @@ class TestSessionPartitionFiles(BaseSessionTest):
def test_provides_empty_list_for_unknown_service_regions(self):
regions = self.session.get_available_regions('__foo__')
- self.assertEqual([], regions)
+ assert [] == regions
class TestSessionUserAgent(BaseSessionTest):
def test_can_change_user_agent_name(self):
self.session.user_agent_name = 'something-else'
- self.assertTrue(self.session.user_agent().startswith('something-else'))
+ assert self.session.user_agent().startswith('something-else')
def test_can_change_user_agent_version(self):
self.session.user_agent_version = '24.0'
- self.assertTrue(self.session.user_agent().startswith('Botocore/24.0'))
+ assert self.session.user_agent().startswith('Botocore/24.0')
def test_can_append_to_user_agent(self):
self.session.user_agent_extra = 'custom-thing/other'
- self.assertTrue(
- self.session.user_agent().endswith('custom-thing/other'))
+ assert self.session.user_agent().endswith('custom-thing/other')
def test_execution_env_not_set(self):
- self.assertFalse(self.session.user_agent().endswith('FooEnv'))
+ assert not self.session.user_agent().endswith('FooEnv')
def test_execution_env_set(self):
self.environ['AWS_EXECUTION_ENV'] = 'FooEnv'
- self.assertTrue(self.session.user_agent().endswith(' exec-env/FooEnv'))
+ assert self.session.user_agent().endswith(' exec-env/FooEnv')
def test_agent_extra_and_exec_env(self):
self.session.user_agent_extra = 'custom-thing/other'
self.environ['AWS_EXECUTION_ENV'] = 'FooEnv'
user_agent = self.session.user_agent()
- self.assertTrue(user_agent.endswith('custom-thing/other'))
- self.assertIn('exec-env/FooEnv', user_agent)
+ assert user_agent.endswith('custom-thing/other')
+ assert 'exec-env/FooEnv' in user_agent
class TestConfigLoaderObject(BaseSessionTest):
@@ -419,9 +417,9 @@ class TestConfigLoaderObject(BaseSessionTest):
session.set_config_variable('credentials_file', f.name)
# Now trying to retrieve the scoped config should pull in
# values from the shared credentials file.
- self.assertEqual(session.get_scoped_config(),
- {'aws_access_key_id': 'a',
- 'aws_secret_access_key': 'b'})
+ assert session.get_scoped_config() == {
+ 'aws_access_key_id': 'a',
+ 'aws_secret_access_key': 'b'}
class TestGetServiceModel(BaseSessionTest):
@@ -432,8 +430,8 @@ class TestGetServiceModel(BaseSessionTest):
}
self.session.register_component('data_loader', loader)
model = self.session.get_service_model('made_up')
- self.assertIsInstance(model, ServiceModel)
- self.assertEqual(model.service_name, 'made_up')
+ assert isinstance(model, ServiceModel)
+ assert model.service_name == 'made_up'
class TestGetPaginatorModel(BaseSessionTest):
@@ -445,7 +443,7 @@ class TestGetPaginatorModel(BaseSessionTest):
model = self.session.get_paginator_model('foo')
# Verify we get a PaginatorModel back
- self.assertIsInstance(model, PaginatorModel)
+ assert isinstance(model, PaginatorModel)
# Verify we called the loader correctly.
loader.load_service_model.assert_called_with(
'foo', 'paginators-1', None)
@@ -460,8 +458,8 @@ class TestGetWaiterModel(BaseSessionTest):
model = self.session.get_waiter_model('foo')
# Verify we (1) get the expected return data,
- self.assertIsInstance(model, WaiterModel)
- self.assertEqual(model.waiter_names, [])
+ assert isinstance(model, WaiterModel)
+ assert model.waiter_names == []
# and (2) call the loader correctly.
loader.load_service_model.assert_called_with(
'foo', 'waiters-2', None)
@@ -470,7 +468,7 @@ class TestGetWaiterModel(BaseSessionTest):
class TestCreateClient(BaseSessionTest):
def test_can_create_client(self):
sts_client = self.session.create_client('sts', 'us-west-2')
- self.assertIsInstance(sts_client, client.BaseClient)
+ assert isinstance(sts_client, client.BaseClient)
def test_credential_provider_not_called_when_creds_provided(self):
cred_provider = mock.Mock()
@@ -481,19 +479,19 @@ class TestCreateClient(BaseSessionTest):
aws_access_key_id='foo',
aws_secret_access_key='bar',
aws_session_token='baz')
- self.assertFalse(cred_provider.load_credentials.called,
- "Credential provider was called even though "
- "explicit credentials were provided to the "
- "create_client call.")
+ message = ("Credential provider was called even though "
+ "explicit credentials were provided to the "
+ "create_client call.")
+ assert not cred_provider.load_credentials.called, message
def test_cred_provider_called_when_partial_creds_provided(self):
- with self.assertRaises(botocore.exceptions.PartialCredentialsError):
+ with pytest.raises(botocore.exceptions.PartialCredentialsError):
self.session.create_client(
'sts', 'us-west-2',
aws_access_key_id='foo',
aws_secret_access_key=None
)
- with self.assertRaises(botocore.exceptions.PartialCredentialsError):
+ with pytest.raises(botocore.exceptions.PartialCredentialsError):
self.session.create_client(
'sts', 'us-west-2',
aws_access_key_id=None,
@@ -506,12 +504,12 @@ class TestCreateClient(BaseSessionTest):
'credential_provider', cred_provider)
config = botocore.config.Config(signature_version=UNSIGNED)
self.session.create_client('sts', 'us-west-2', config=config)
- self.assertFalse(cred_provider.load_credentials.called)
+ assert not cred_provider.load_credentials.called
@mock.patch('botocore.client.ClientCreator')
def test_config_passed_to_client_creator(self, client_creator):
# Make sure there is no default set
- self.assertEqual(self.session.get_default_client_config(), None)
+ assert self.session.get_default_client_config() is None
# The config passed to the client should be the one that is used
# in creating the client.
@@ -547,37 +545,37 @@ class TestCreateClient(BaseSessionTest):
client_creator.return_value.create_client.call_args[1][
'client_config'])
# Check that the client configs were merged
- self.assertEqual(used_client_config.region_name, 'us-east-1')
+ assert used_client_config.region_name == 'us-east-1'
# Make sure that the client config used is not the default client
# config or the one passed in. It should be a new config.
- self.assertIsNot(used_client_config, config)
- self.assertIsNot(used_client_config, other_config)
+ assert used_client_config is not config
+ assert used_client_config is not other_config
def test_create_client_with_region(self):
ec2_client = self.session.create_client(
'ec2', 'us-west-2')
- self.assertEqual(ec2_client.meta.region_name, 'us-west-2')
+ assert ec2_client.meta.region_name == 'us-west-2'
def test_create_client_with_region_and_client_config(self):
config = botocore.config.Config()
# Use a client config with no region configured.
ec2_client = self.session.create_client(
'ec2', region_name='us-west-2', config=config)
- self.assertEqual(ec2_client.meta.region_name, 'us-west-2')
+ assert ec2_client.meta.region_name == 'us-west-2'
# If the region name is changed, it should not change the
# region of the client
config.region_name = 'us-east-1'
- self.assertEqual(ec2_client.meta.region_name, 'us-west-2')
+ assert ec2_client.meta.region_name == 'us-west-2'
# Now make a new client with the updated client config.
ec2_client = self.session.create_client(
'ec2', config=config)
- self.assertEqual(ec2_client.meta.region_name, 'us-east-1')
+ assert ec2_client.meta.region_name == 'us-east-1'
def test_create_client_no_region_and_no_client_config(self):
ec2_client = self.session.create_client('ec2')
- self.assertEqual(ec2_client.meta.region_name, 'us-west-11')
+ assert ec2_client.meta.region_name == 'us-west-11'
@mock.patch('botocore.client.ClientCreator')
def test_create_client_with_ca_bundle_from_config(self, client_creator):
@@ -591,21 +589,21 @@ class TestCreateClient(BaseSessionTest):
self.session.create_client('ec2', 'us-west-2')
call_kwargs = client_creator.return_value.\
create_client.call_args[1]
- self.assertEqual(call_kwargs['verify'], 'config-certs.pem')
+ assert call_kwargs['verify'] == 'config-certs.pem'
@mock.patch('botocore.client.ClientCreator')
def test_create_client_with_ca_bundle_from_env_var(self, client_creator):
self.environ['FOO_AWS_CA_BUNDLE'] = 'env-certs.pem'
self.session.create_client('ec2', 'us-west-2')
call_kwargs = client_creator.return_value.create_client.call_args[1]
- self.assertEqual(call_kwargs['verify'], 'env-certs.pem')
+ assert call_kwargs['verify'] == 'env-certs.pem'
@mock.patch('botocore.client.ClientCreator')
def test_create_client_with_verify_param(self, client_creator):
self.session.create_client(
'ec2', 'us-west-2', verify='verify-certs.pem')
call_kwargs = client_creator.return_value.create_client.call_args[1]
- self.assertEqual(call_kwargs['verify'], 'verify-certs.pem')
+ assert call_kwargs['verify'] == 'verify-certs.pem'
@mock.patch('botocore.client.ClientCreator')
def test_create_client_verify_param_overrides_all(self, client_creator):
@@ -627,13 +625,13 @@ class TestCreateClient(BaseSessionTest):
create_client.call_args[1]
# The verify parameter should override all the other
# configurations
- self.assertEqual(call_kwargs['verify'], 'verify-certs.pem')
+ assert call_kwargs['verify'] == 'verify-certs.pem'
@mock.patch('botocore.client.ClientCreator')
def test_create_client_use_no_api_version_by_default(self, client_creator):
self.session.create_client('myservice', 'us-west-2')
call_kwargs = client_creator.return_value.create_client.call_args[1]
- self.assertEqual(call_kwargs['api_version'], None)
+ assert call_kwargs['api_version'] is None
@mock.patch('botocore.client.ClientCreator')
def test_create_client_uses_api_version_from_config(self, client_creator):
@@ -649,7 +647,7 @@ class TestCreateClient(BaseSessionTest):
self.session.create_client('myservice', 'us-west-2')
call_kwargs = client_creator.return_value.\
create_client.call_args[1]
- self.assertEqual(call_kwargs['api_version'], config_api_version)
+ assert call_kwargs['api_version'] == config_api_version
@mock.patch('botocore.client.ClientCreator')
def test_can_specify_multiple_versions_from_config(self, client_creator):
@@ -663,19 +661,18 @@ class TestCreateClient(BaseSessionTest):
' myservice = %s\n'
' myservice2 = %s\n' % (
config_api_version, second_config_api_version)
- )
+ )
f.flush()
self.session.create_client('myservice', 'us-west-2')
call_kwargs = client_creator.return_value.\
create_client.call_args[1]
- self.assertEqual(call_kwargs['api_version'], config_api_version)
+ assert call_kwargs['api_version'] == config_api_version
self.session.create_client('myservice2', 'us-west-2')
call_kwargs = client_creator.return_value.\
create_client.call_args[1]
- self.assertEqual(
- call_kwargs['api_version'], second_config_api_version)
+ assert call_kwargs['api_version'] == second_config_api_version
@mock.patch('botocore.client.ClientCreator')
def test_param_api_version_overrides_config_value(self, client_creator):
@@ -693,31 +690,28 @@ class TestCreateClient(BaseSessionTest):
'myservice', 'us-west-2', api_version=override_api_version)
call_kwargs = client_creator.return_value.\
create_client.call_args[1]
- self.assertEqual(call_kwargs['api_version'], override_api_version)
+ assert call_kwargs['api_version'] == override_api_version
class TestSessionComponent(BaseSessionTest):
def test_internal_component(self):
component = object()
self.session._register_internal_component('internal', component)
- self.assertIs(
- self.session._get_internal_component('internal'), component)
- with self.assertRaises(ValueError):
+ assert self.session._get_internal_component('internal') is component
+ with pytest.raises(ValueError):
self.session.get_component('internal')
def test_internal_endpoint_resolver_is_same_as_deprecated_public(self):
endpoint_resolver = self.session._get_internal_component(
'endpoint_resolver')
- self.assertIs(
- self.session.get_component('endpoint_resolver'), endpoint_resolver)
+ assert self.session.get_component(
+ 'endpoint_resolver') is endpoint_resolver
def test_internal_exceptions_factory_is_same_as_deprecated_public(self):
exceptions_factory = self.session._get_internal_component(
'exceptions_factory')
- self.assertIs(
- self.session.get_component('exceptions_factory'),
- exceptions_factory
- )
+ assert self.session.get_component(
+ 'exceptions_factory') is exceptions_factory
class TestClientMonitoring(BaseSessionTest):
@@ -732,10 +726,10 @@ class TestClientMonitoring(BaseSessionTest):
with mock.patch('botocore.monitoring.SocketPublisher',
spec=True) as mock_publisher:
client = session.create_client('ec2', 'us-west-2')
- self.assertEqual(mock_publisher.call_count, 1)
+ assert mock_publisher.call_count == 1
_, args, kwargs = mock_publisher.mock_calls[0]
- self.assertEqual(kwargs.get('host'), host)
- self.assertEqual(kwargs.get('port'), port)
+ assert kwargs.get('host') == host
+ assert kwargs.get('port') == port
def assert_created_client_is_not_monitored(self, session):
with mock.patch('botocore.session.monitoring.Monitor',
@@ -794,42 +788,42 @@ class TestComponentLocator(unittest.TestCase):
self.components = botocore.session.ComponentLocator()
def test_unknown_component_raises_exception(self):
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
self.components.get_component('unknown-component')
def test_can_register_and_retrieve_component(self):
component = object()
self.components.register_component('foo', component)
- self.assertIs(self.components.get_component('foo'), component)
+ assert self.components.get_component('foo') is component
def test_last_registration_wins(self):
first = object()
second = object()
self.components.register_component('foo', first)
self.components.register_component('foo', second)
- self.assertIs(self.components.get_component('foo'), second)
+ assert self.components.get_component('foo') is second
def test_can_lazy_register_a_component(self):
component = object()
- lazy = lambda: component
+ def lazy(): return component
self.components.lazy_register_component('foo', lazy)
- self.assertIs(self.components.get_component('foo'), component)
+ assert self.components.get_component('foo') is component
def test_latest_registration_wins_even_if_lazy(self):
first = object()
second = object()
- lazy_second = lambda: second
+ def lazy_second(): return second
self.components.register_component('foo', first)
self.components.lazy_register_component('foo', lazy_second)
- self.assertIs(self.components.get_component('foo'), second)
+ assert self.components.get_component('foo') is second
def test_latest_registration_overrides_lazy(self):
first = object()
second = object()
- lazy_first = lambda: first
+ def lazy_first(): return first
self.components.lazy_register_component('foo', lazy_first)
self.components.register_component('foo', second)
- self.assertIs(self.components.get_component('foo'), second)
+ assert self.components.get_component('foo') is second
def test_lazy_registration_factory_does_not_remove_from_list_on_error(self):
class ArbitraryError(Exception):
@@ -840,41 +834,41 @@ class TestComponentLocator(unittest.TestCase):
self.components.lazy_register_component('foo', bad_factory)
- with self.assertRaises(ArbitraryError):
+ with pytest.raises(ArbitraryError):
self.components.get_component('foo')
# Trying again should raise the same exception,
# not an ValueError("Unknown component")
- with self.assertRaises(ArbitraryError):
+ with pytest.raises(ArbitraryError):
self.components.get_component('foo')
class TestDefaultClientConfig(BaseSessionTest):
def test_new_session_has_no_default_client_config(self):
- self.assertEqual(self.session.get_default_client_config(), None)
+ assert self.session.get_default_client_config() is None
def test_set_and_get_client_config(self):
client_config = botocore.config.Config()
self.session.set_default_client_config(client_config)
- self.assertIs(self.session.get_default_client_config(), client_config)
+ assert self.session.get_default_client_config() is client_config
class TestSessionRegionSetup(BaseSessionTest):
def test_new_session_with_valid_region(self):
s3_client = self.session.create_client('s3', 'us-west-2')
- self.assertIsInstance(s3_client, client.BaseClient)
- self.assertEquals(s3_client.meta.region_name, 'us-west-2')
+ assert isinstance(s3_client, client.BaseClient)
+ assert s3_client.meta.region_name == 'us-west-2'
def test_new_session_with_unknown_region(self):
s3_client = self.session.create_client('s3', 'MyCustomRegion1')
- self.assertIsInstance(s3_client, client.BaseClient)
- self.assertEquals(s3_client.meta.region_name, 'MyCustomRegion1')
+ assert isinstance(s3_client, client.BaseClient)
+ assert s3_client.meta.region_name == 'MyCustomRegion1'
def test_new_session_with_invalid_region(self):
- with self.assertRaises(botocore.exceptions.InvalidRegionError):
+ with pytest.raises(botocore.exceptions.InvalidRegionError):
s3_client = self.session.create_client('s3', 'not.a.real#region')
def test_new_session_with_none_region(self):
s3_client = self.session.create_client('s3', region_name=None)
- self.assertIsInstance(s3_client, client.BaseClient)
- self.assertTrue(s3_client.meta.region_name is not None)
+ assert isinstance(s3_client, client.BaseClient)
+ assert s3_client.meta.region_name is not None
diff --git a/tests/unit/test_session_legacy.py b/tests/unit/test_session_legacy.py
index 4016d1b4..dbf82f51 100644
--- a/tests/unit/test_session_legacy.py
+++ b/tests/unit/test_session_legacy.py
@@ -18,6 +18,7 @@ import os
import logging
import tempfile
import shutil
+import pytest
from tests import mock
@@ -88,17 +89,17 @@ class SessionTest(BaseSessionTest):
session = create_session(session_vars=env_vars)
self.environ['BAR_DEFAULT_PROFILE'] = 'first'
self.environ['BAR_PROFILE'] = 'second'
- self.assertEqual(session.get_config_variable('profile'), 'first')
+ assert session.get_config_variable('profile') == 'first'
def test_profile_when_set_explicitly(self):
session = create_session(session_vars=self.env_vars, profile='asdf')
- self.assertEqual(session.profile, 'asdf')
+ assert session.profile == 'asdf'
def test_profile_when_pulled_from_env(self):
self.environ['FOO_PROFILE'] = 'bar'
# Even though we didn't explicitly pass in a profile, the
# profile property will still look this up for us.
- self.assertEqual(self.session.profile, 'bar')
+ assert self.session.profile == 'bar'
def test_multiple_env_vars_uses_second_var(self):
env_vars = {
@@ -108,20 +109,19 @@ class SessionTest(BaseSessionTest):
session = create_session(session_vars=env_vars)
self.environ.pop('BAR_DEFAULT_PROFILE', None)
self.environ['BAR_PROFILE'] = 'second'
- self.assertEqual(session.get_config_variable('profile'), 'second')
+ assert session.get_config_variable('profile') == 'second'
def test_profile(self):
- self.assertEqual(self.session.get_config_variable('profile'), 'foo')
- self.assertEqual(self.session.get_config_variable('region'),
- 'us-west-11')
+ assert self.session.get_config_variable('profile') == 'foo'
+ assert self.session.get_config_variable('region') == 'us-west-11'
self.session.get_config_variable('profile') == 'default'
saved_region = self.environ['FOO_REGION']
del self.environ['FOO_REGION']
saved_profile = self.environ['FOO_PROFILE']
del self.environ['FOO_PROFILE']
session = create_session(session_vars=self.env_vars)
- self.assertEqual(session.get_config_variable('profile'), None)
- self.assertEqual(session.get_config_variable('region'), 'us-west-1')
+ assert session.get_config_variable('profile') is None
+ assert session.get_config_variable('region') == 'us-west-1'
self.environ['FOO_REGION'] = saved_region
self.environ['FOO_PROFILE'] = saved_profile
@@ -129,17 +129,17 @@ class SessionTest(BaseSessionTest):
# Given we have no profile:
self.environ['FOO_PROFILE'] = 'profile_that_does_not_exist'
session = create_session(session_vars=self.env_vars)
- with self.assertRaises(botocore.exceptions.ProfileNotFound):
+ with pytest.raises(botocore.exceptions.ProfileNotFound):
session.get_scoped_config()
def test_variable_does_not_exist(self):
session = create_session(session_vars=self.env_vars)
- self.assertIsNone(session.get_config_variable('foo/bar'))
+ assert session.get_config_variable('foo/bar') is None
def test_get_aws_services_in_alphabetical_order(self):
session = create_session(session_vars=self.env_vars)
services = session.get_available_services()
- self.assertEqual(sorted(services), services)
+ assert sorted(services) == services
def test_profile_does_not_exist_with_default_profile(self):
session = create_session(session_vars=self.env_vars)
@@ -147,7 +147,7 @@ class SessionTest(BaseSessionTest):
# We should have loaded this properly, and we'll check
# that foo_access_key which is defined in the config
# file should be present in the loaded config dict.
- self.assertIn('aws_access_key_id', config)
+ assert 'aws_access_key_id' in config
def test_type_conversions_occur_when_specified(self):
# Specify that we can retrieve the var from the
@@ -159,8 +159,7 @@ class SessionTest(BaseSessionTest):
self.environ['FOO_TIMEOUT'] = '10'
session = create_session(session_vars=self.env_vars)
# But we should type convert this to a string.
- self.assertEqual(
- session.get_config_variable('metadata_service_timeout'), 10)
+ assert session.get_config_variable('metadata_service_timeout') == 10
def test_default_profile_specified_raises_exception(self):
# If you explicity set the default profile and you don't
@@ -173,7 +172,7 @@ class SessionTest(BaseSessionTest):
# In this case, even though we specified default, because
# the boto_config_empty config file does not have a default
# profile, we should be raising an exception.
- with self.assertRaises(botocore.exceptions.ProfileNotFound):
+ with pytest.raises(botocore.exceptions.ProfileNotFound):
session.get_scoped_config()
def test_file_logger(self):
@@ -182,15 +181,15 @@ class SessionTest(BaseSessionTest):
self.session.set_file_logger(logging.DEBUG, temp_file)
self.addCleanup(self.close_log_file_handler, tempdir, temp_file)
self.session.get_credentials()
- self.assertTrue(os.path.isfile(temp_file))
+ assert os.path.isfile(temp_file)
with open(temp_file) as logfile:
s = logfile.read()
- self.assertTrue('Looking for credentials' in s)
+ assert 'Looking for credentials' in s
def test_full_config_property(self):
full_config = self.session.full_config
- self.assertTrue('foo' in full_config['profiles'])
- self.assertTrue('default' in full_config['profiles'])
+ assert 'foo' in full_config['profiles']
+ assert 'default' in full_config['profiles']
def test_full_config_merges_creds_file_data(self):
with temporary_file('w') as f:
@@ -201,9 +200,9 @@ class SessionTest(BaseSessionTest):
f.flush()
full_config = self.session.full_config
- self.assertEqual(full_config['profiles']['newprofile'],
- {'aws_access_key_id': 'FROM_CREDS_FILE_1',
- 'aws_secret_access_key': 'FROM_CREDS_FILE_2'})
+ assert full_config['profiles']['newprofile'] == {
+ 'aws_access_key_id': 'FROM_CREDS_FILE_1',
+ 'aws_secret_access_key': 'FROM_CREDS_FILE_2'}
def test_path_not_in_available_profiles(self):
with temporary_file('w') as f:
@@ -214,17 +213,16 @@ class SessionTest(BaseSessionTest):
f.flush()
profiles = self.session.available_profiles
- self.assertEqual(
- set(profiles),
- set(['foo', 'default', 'newprofile']))
+ assert set(profiles) == set([
+ 'foo', 'default', 'newprofile'])
def test_emit_delegates_to_emitter(self):
calls = []
handler = lambda **kwargs: calls.append(kwargs)
self.session.register('foo', handler)
self.session.emit('foo')
- self.assertEqual(len(calls), 1)
- self.assertEqual(calls[0]['event_name'], 'foo')
+ assert len(calls) == 1
+ assert calls[0]['event_name'] == 'foo'
def test_emitter_can_be_passed_in(self):
events = HierarchicalEmitter()
@@ -235,7 +233,7 @@ class SessionTest(BaseSessionTest):
events.register('foo', handler)
session.emit('foo')
- self.assertEqual(len(calls), 1)
+ assert len(calls) == 1
def test_emit_first_non_none(self):
session = create_session(session_vars=self.env_vars)
@@ -243,7 +241,7 @@ class SessionTest(BaseSessionTest):
session.register('foo', lambda **kwargs: 'first')
session.register('foo', lambda **kwargs: 'second')
response = session.emit_first_non_none_response('foo')
- self.assertEqual(response, 'first')
+ assert response == 'first'
@mock.patch('logging.getLogger')
@mock.patch('logging.FileHandler')
@@ -269,11 +267,11 @@ class SessionTest(BaseSessionTest):
handler = lambda **kwargs: calls.append(kwargs)
self.session.register('foo', handler, unique_id='bar')
self.session.emit('foo')
- self.assertEqual(calls[0]['event_name'], 'foo')
+ assert calls[0]['event_name'] == 'foo'
calls = []
self.session.unregister('foo', unique_id='bar')
self.session.emit('foo')
- self.assertEqual(calls, [])
+ assert calls == []
class TestBuiltinEventHandlers(BaseSessionTest):
@@ -298,7 +296,7 @@ class TestBuiltinEventHandlers(BaseSessionTest):
session = botocore.session.Session(self.env_vars, None,
include_builtin_handlers=True)
session.emit('foo')
- self.assertTrue(self.foo_called)
+ assert self.foo_called
class TestSessionConfigurationVars(BaseSessionTest):
@@ -306,61 +304,51 @@ class TestSessionConfigurationVars(BaseSessionTest):
self.session.session_var_map['foobar'] = (None, 'FOOBAR',
'default', None)
# Default value.
- self.assertEqual(self.session.get_config_variable('foobar'), 'default')
+ assert self.session.get_config_variable('foobar') == 'default'
# Retrieve from os environment variable.
self.environ['FOOBAR'] = 'fromenv'
- self.assertEqual(self.session.get_config_variable('foobar'), 'fromenv')
+ assert self.session.get_config_variable('foobar') == 'fromenv'
# Explicit override.
self.session.set_config_variable('foobar', 'session-instance')
- self.assertEqual(self.session.get_config_variable('foobar'),
- 'session-instance')
+ assert self.session.get_config_variable('foobar') == 'session-instance'
# Can disable this check via the ``methods`` arg.
del self.environ['FOOBAR']
- self.assertEqual(self.session.get_config_variable(
- 'foobar', methods=('env', 'config')), 'default')
+ assert self.session.get_config_variable(
+ 'foobar', methods=('env', 'config')) == 'default'
def test_default_value_can_be_overriden(self):
self.session.session_var_map['foobar'] = (None, 'FOOBAR', 'default',
None)
- self.assertEqual(self.session.get_config_variable('foobar'), 'default')
+ assert self.session.get_config_variable('foobar') == 'default'
def test_can_get_session_vars_info_from_default_session(self):
# This test is to ensure that you can still reach the session_vars_map
# information from the session and that it has the expected value.
self.session = create_session()
- self.assertEqual(self.session.session_var_map['region'],
- ('region', 'AWS_DEFAULT_REGION', None, None))
- self.assertEqual(
- self.session.session_var_map['profile'],
- (None, ['AWS_DEFAULT_PROFILE', 'AWS_PROFILE'], None, None))
- self.assertEqual(
- self.session.session_var_map['data_path'],
- ('data_path', 'AWS_DATA_PATH', None, None))
- self.assertEqual(
- self.session.session_var_map['config_file'],
- (None, 'AWS_CONFIG_FILE', '~/.aws/config', None))
- self.assertEqual(
- self.session.session_var_map['ca_bundle'],
- ('ca_bundle', 'AWS_CA_BUNDLE', None, None))
- self.assertEqual(
- self.session.session_var_map['api_versions'],
- ('api_versions', None, {}, None))
- self.assertEqual(
- self.session.session_var_map['credentials_file'],
- (None, 'AWS_SHARED_CREDENTIALS_FILE', '~/.aws/credentials', None))
- self.assertEqual(
- self.session.session_var_map['metadata_service_timeout'],
- ('metadata_service_timeout',
- 'AWS_METADATA_SERVICE_TIMEOUT', 1, int))
- self.assertEqual(
- self.session.session_var_map['metadata_service_num_attempts'],
- ('metadata_service_num_attempts',
- 'AWS_METADATA_SERVICE_NUM_ATTEMPTS', 1, int))
- self.assertEqual(
- self.session.session_var_map['parameter_validation'],
- ('parameter_validation', None, True, None))
+ assert self.session.session_var_map['region'] == (
+ 'region', 'AWS_DEFAULT_REGION', None, None)
+ assert self.session.session_var_map['profile'] == (
+ None, ['AWS_DEFAULT_PROFILE', 'AWS_PROFILE'], None, None)
+ assert self.session.session_var_map['data_path'] == (
+ 'data_path', 'AWS_DATA_PATH', None, None)
+ assert self.session.session_var_map['config_file'] == (
+ None, 'AWS_CONFIG_FILE', '~/.aws/config', None)
+ assert self.session.session_var_map['ca_bundle'] == (
+ 'ca_bundle', 'AWS_CA_BUNDLE', None, None)
+ assert self.session.session_var_map['api_versions'] == (
+ 'api_versions', None, {}, None)
+ assert self.session.session_var_map['credentials_file'] == (
+ None, 'AWS_SHARED_CREDENTIALS_FILE', '~/.aws/credentials', None)
+ assert self.session.session_var_map['metadata_service_timeout'] == (
+ 'metadata_service_timeout',
+ 'AWS_METADATA_SERVICE_TIMEOUT', 1, int)
+ assert self.session.session_var_map['metadata_service_num_attempts'] == (
+ 'metadata_service_num_attempts',
+ 'AWS_METADATA_SERVICE_NUM_ATTEMPTS', 1, int)
+ assert self.session.session_var_map['parameter_validation'] == (
+ 'parameter_validation', None, True, None)
class TestSessionPartitionFiles(BaseSessionTest):
@@ -369,7 +357,7 @@ class TestSessionPartitionFiles(BaseSessionTest):
mock_resolver.get_available_partitions.return_value = ['foo']
self.session._register_internal_component(
'endpoint_resolver', mock_resolver)
- self.assertEqual(['foo'], self.session.get_available_partitions())
+ assert self.session.get_available_partitions() == ['foo']
def test_proxies_list_endpoints_to_resolver(self):
resolver = mock.Mock()
@@ -380,36 +368,35 @@ class TestSessionPartitionFiles(BaseSessionTest):
def test_provides_empty_list_for_unknown_service_regions(self):
regions = self.session.get_available_regions('__foo__')
- self.assertEqual([], regions)
+ assert regions == []
class TestSessionUserAgent(BaseSessionTest):
def test_can_change_user_agent_name(self):
self.session.user_agent_name = 'something-else'
- self.assertTrue(self.session.user_agent().startswith('something-else'))
+ assert self.session.user_agent().startswith('something-else') is True
def test_can_change_user_agent_version(self):
self.session.user_agent_version = '24.0'
- self.assertTrue(self.session.user_agent().startswith('Botocore/24.0'))
+ assert self.session.user_agent().startswith('Botocore/24.0') is True
def test_can_append_to_user_agent(self):
self.session.user_agent_extra = 'custom-thing/other'
- self.assertTrue(
- self.session.user_agent().endswith('custom-thing/other'))
+ assert self.session.user_agent().endswith('custom-thing/other') is True
def test_execution_env_not_set(self):
- self.assertFalse(self.session.user_agent().endswith('FooEnv'))
+ assert not self.session.user_agent().endswith('FooEnv')
def test_execution_env_set(self):
self.environ['AWS_EXECUTION_ENV'] = 'FooEnv'
- self.assertTrue(self.session.user_agent().endswith(' exec-env/FooEnv'))
+ assert self.session.user_agent().endswith(' exec-env/FooEnv') is True
def test_agent_extra_and_exec_env(self):
self.session.user_agent_extra = 'custom-thing/other'
self.environ['AWS_EXECUTION_ENV'] = 'FooEnv'
user_agent = self.session.user_agent()
- self.assertTrue(user_agent.endswith('custom-thing/other'))
- self.assertIn('exec-env/FooEnv', user_agent)
+ assert user_agent.endswith('custom-thing/other')
+ assert 'exec-env/FooEnv' in user_agent
class TestConfigLoaderObject(BaseSessionTest):
@@ -423,9 +410,9 @@ class TestConfigLoaderObject(BaseSessionTest):
session.set_config_variable('credentials_file', f.name)
# Now trying to retrieve the scoped config should pull in
# values from the shared credentials file.
- self.assertEqual(session.get_scoped_config(),
- {'aws_access_key_id': 'a',
- 'aws_secret_access_key': 'b'})
+ assert session.get_scoped_config() == {
+ 'aws_access_key_id': 'a',
+ 'aws_secret_access_key': 'b'}
class TestGetServiceModel(BaseSessionTest):
@@ -436,8 +423,8 @@ class TestGetServiceModel(BaseSessionTest):
}
self.session.register_component('data_loader', loader)
model = self.session.get_service_model('made_up')
- self.assertIsInstance(model, ServiceModel)
- self.assertEqual(model.service_name, 'made_up')
+ assert isinstance(model, ServiceModel)
+ assert model.service_name == 'made_up'
class TestGetPaginatorModel(BaseSessionTest):
@@ -449,7 +436,7 @@ class TestGetPaginatorModel(BaseSessionTest):
model = self.session.get_paginator_model('foo')
# Verify we get a PaginatorModel back
- self.assertIsInstance(model, PaginatorModel)
+ assert isinstance(model, PaginatorModel)
# Verify we called the loader correctly.
loader.load_service_model.assert_called_with(
'foo', 'paginators-1', None)
@@ -464,8 +451,8 @@ class TestGetWaiterModel(BaseSessionTest):
model = self.session.get_waiter_model('foo')
# Verify we (1) get the expected return data,
- self.assertIsInstance(model, WaiterModel)
- self.assertEqual(model.waiter_names, [])
+ assert isinstance(model, WaiterModel)
+ assert model.waiter_names == []
# and (2) call the loader correctly.
loader.load_service_model.assert_called_with(
'foo', 'waiters-2', None)
@@ -474,7 +461,7 @@ class TestGetWaiterModel(BaseSessionTest):
class TestCreateClient(BaseSessionTest):
def test_can_create_client(self):
sts_client = self.session.create_client('sts', 'us-west-2')
- self.assertIsInstance(sts_client, client.BaseClient)
+ assert isinstance(sts_client, client.BaseClient)
def test_credential_provider_not_called_when_creds_provided(self):
cred_provider = mock.Mock()
@@ -485,19 +472,19 @@ class TestCreateClient(BaseSessionTest):
aws_access_key_id='foo',
aws_secret_access_key='bar',
aws_session_token='baz')
- self.assertFalse(cred_provider.load_credentials.called,
- "Credential provider was called even though "
- "explicit credentials were provided to the "
- "create_client call.")
+ message = ("Credential provider was called even though"
+ "explicit credentials were provided to the "
+ "create_client call.")
+ assert not cred_provider.load_credentials.called, message
def test_cred_provider_called_when_partial_creds_provided(self):
- with self.assertRaises(botocore.exceptions.PartialCredentialsError):
+ with pytest.raises(botocore.exceptions.PartialCredentialsError):
self.session.create_client(
'sts', 'us-west-2',
aws_access_key_id='foo',
aws_secret_access_key=None
)
- with self.assertRaises(botocore.exceptions.PartialCredentialsError):
+ with pytest.raises(botocore.exceptions.PartialCredentialsError):
self.session.create_client(
'sts', 'us-west-2',
aws_access_key_id=None,
@@ -507,7 +494,7 @@ class TestCreateClient(BaseSessionTest):
@mock.patch('botocore.client.ClientCreator')
def test_config_passed_to_client_creator(self, client_creator):
# Make sure there is no default set
- self.assertEqual(self.session.get_default_client_config(), None)
+ assert self.session.get_default_client_config() is None
# The config passed to the client should be the one that is used
# in creating the client.
@@ -543,37 +530,37 @@ class TestCreateClient(BaseSessionTest):
client_creator.return_value.create_client.call_args[1][
'client_config'])
# Check that the client configs were merged
- self.assertEqual(used_client_config.region_name, 'us-east-1')
+ assert used_client_config.region_name == 'us-east-1'
# Make sure that the client config used is not the default client
# config or the one passed in. It should be a new config.
- self.assertIsNot(used_client_config, config)
- self.assertIsNot(used_client_config, other_config)
+ assert used_client_config is not config
+ assert used_client_config is not other_config
def test_create_client_with_region(self):
ec2_client = self.session.create_client(
'ec2', 'us-west-2')
- self.assertEqual(ec2_client.meta.region_name, 'us-west-2')
+ assert ec2_client.meta.region_name == 'us-west-2'
def test_create_client_with_region_and_client_config(self):
config = botocore.config.Config()
# Use a client config with no region configured.
ec2_client = self.session.create_client(
'ec2', region_name='us-west-2', config=config)
- self.assertEqual(ec2_client.meta.region_name, 'us-west-2')
+ assert ec2_client.meta.region_name == 'us-west-2'
# If the region name is changed, it should not change the
# region of the client
config.region_name = 'us-east-1'
- self.assertEqual(ec2_client.meta.region_name, 'us-west-2')
+ assert ec2_client.meta.region_name == 'us-west-2'
# Now make a new client with the updated client config.
ec2_client = self.session.create_client(
'ec2', config=config)
- self.assertEqual(ec2_client.meta.region_name, 'us-east-1')
+ assert ec2_client.meta.region_name == 'us-east-1'
def test_create_client_no_region_and_no_client_config(self):
ec2_client = self.session.create_client('ec2')
- self.assertEqual(ec2_client.meta.region_name, 'us-west-11')
+ assert ec2_client.meta.region_name == 'us-west-11'
@mock.patch('botocore.client.ClientCreator')
def test_create_client_with_ca_bundle_from_config(self, client_creator):
@@ -588,21 +575,21 @@ class TestCreateClient(BaseSessionTest):
self.session.create_client('ec2', 'us-west-2')
call_kwargs = client_creator.return_value.\
create_client.call_args[1]
- self.assertEqual(call_kwargs['verify'], 'config-certs.pem')
+ assert call_kwargs['verify'] == 'config-certs.pem'
@mock.patch('botocore.client.ClientCreator')
def test_create_client_with_ca_bundle_from_env_var(self, client_creator):
self.environ['FOO_AWS_CA_BUNDLE'] = 'env-certs.pem'
self.session.create_client('ec2', 'us-west-2')
call_kwargs = client_creator.return_value.create_client.call_args[1]
- self.assertEqual(call_kwargs['verify'], 'env-certs.pem')
+ assert call_kwargs['verify'] == 'env-certs.pem'
@mock.patch('botocore.client.ClientCreator')
def test_create_client_with_verify_param(self, client_creator):
self.session.create_client(
'ec2', 'us-west-2', verify='verify-certs.pem')
call_kwargs = client_creator.return_value.create_client.call_args[1]
- self.assertEqual(call_kwargs['verify'], 'verify-certs.pem')
+ assert call_kwargs['verify'] == 'verify-certs.pem'
@mock.patch('botocore.client.ClientCreator')
def test_create_client_verify_param_overrides_all(self, client_creator):
@@ -625,13 +612,13 @@ class TestCreateClient(BaseSessionTest):
create_client.call_args[1]
# The verify parameter should override all the other
# configurations
- self.assertEqual(call_kwargs['verify'], 'verify-certs.pem')
+ assert call_kwargs['verify'] == 'verify-certs.pem'
@mock.patch('botocore.client.ClientCreator')
def test_create_client_use_no_api_version_by_default(self, client_creator):
self.session.create_client('myservice', 'us-west-2')
call_kwargs = client_creator.return_value.create_client.call_args[1]
- self.assertEqual(call_kwargs['api_version'], None)
+ assert call_kwargs['api_version'] is None
@mock.patch('botocore.client.ClientCreator')
def test_create_client_uses_api_version_from_config(self, client_creator):
@@ -648,7 +635,7 @@ class TestCreateClient(BaseSessionTest):
self.session.create_client('myservice', 'us-west-2')
call_kwargs = client_creator.return_value.\
create_client.call_args[1]
- self.assertEqual(call_kwargs['api_version'], config_api_version)
+ assert call_kwargs['api_version'] == config_api_version
@mock.patch('botocore.client.ClientCreator')
def test_can_specify_multiple_versions_from_config(self, client_creator):
@@ -663,19 +650,18 @@ class TestCreateClient(BaseSessionTest):
' myservice = %s\n'
' myservice2 = %s\n' % (
config_api_version, second_config_api_version)
- )
+ )
f.flush()
self.session.create_client('myservice', 'us-west-2')
call_kwargs = client_creator.return_value.\
create_client.call_args[1]
- self.assertEqual(call_kwargs['api_version'], config_api_version)
+ assert call_kwargs['api_version'] == config_api_version
self.session.create_client('myservice2', 'us-west-2')
call_kwargs = client_creator.return_value.\
create_client.call_args[1]
- self.assertEqual(
- call_kwargs['api_version'], second_config_api_version)
+ assert call_kwargs['api_version'] == second_config_api_version
@mock.patch('botocore.client.ClientCreator')
def test_param_api_version_overrides_config_value(self, client_creator):
@@ -694,31 +680,28 @@ class TestCreateClient(BaseSessionTest):
'myservice', 'us-west-2', api_version=override_api_version)
call_kwargs = client_creator.return_value.\
create_client.call_args[1]
- self.assertEqual(call_kwargs['api_version'], override_api_version)
+ assert call_kwargs['api_version'] == override_api_version
class TestSessionComponent(BaseSessionTest):
def test_internal_component(self):
component = object()
self.session._register_internal_component('internal', component)
- self.assertIs(
- self.session._get_internal_component('internal'), component)
- with self.assertRaises(ValueError):
+ assert self.session._get_internal_component('internal') is component
+ with pytest.raises(ValueError):
self.session.get_component('internal')
def test_internal_endpoint_resolver_is_same_as_deprecated_public(self):
endpoint_resolver = self.session._get_internal_component(
'endpoint_resolver')
- self.assertIs(
- self.session.get_component('endpoint_resolver'), endpoint_resolver)
+ assert self.session.get_component(
+ 'endpoint_resolver') is endpoint_resolver
def test_internal_exceptions_factory_is_same_as_deprecated_public(self):
exceptions_factory = self.session._get_internal_component(
'exceptions_factory')
- self.assertIs(
- self.session.get_component('exceptions_factory'),
- exceptions_factory
- )
+ assert self.session.get_component(
+ 'exceptions_factory') is exceptions_factory
class TestComponentLocator(unittest.TestCase):
@@ -726,42 +709,42 @@ class TestComponentLocator(unittest.TestCase):
self.components = botocore.session.ComponentLocator()
def test_unknown_component_raises_exception(self):
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
self.components.get_component('unknown-component')
def test_can_register_and_retrieve_component(self):
component = object()
self.components.register_component('foo', component)
- self.assertIs(self.components.get_component('foo'), component)
+ assert self.components.get_component('foo') is component
def test_last_registration_wins(self):
first = object()
second = object()
self.components.register_component('foo', first)
self.components.register_component('foo', second)
- self.assertIs(self.components.get_component('foo'), second)
+ assert self.components.get_component('foo') is second
def test_can_lazy_register_a_component(self):
component = object()
- lazy = lambda: component
+ def lazy(): return component
self.components.lazy_register_component('foo', lazy)
- self.assertIs(self.components.get_component('foo'), component)
+ assert self.components.get_component('foo') is component
def test_latest_registration_wins_even_if_lazy(self):
first = object()
second = object()
- lazy_second = lambda: second
+ def lazy_second(): return second
self.components.register_component('foo', first)
self.components.lazy_register_component('foo', lazy_second)
- self.assertIs(self.components.get_component('foo'), second)
+ assert self.components.get_component('foo') is second
def test_latest_registration_overrides_lazy(self):
first = object()
second = object()
- lazy_first = lambda: first
+ def lazy_first(): return first
self.components.lazy_register_component('foo', lazy_first)
self.components.register_component('foo', second)
- self.assertIs(self.components.get_component('foo'), second)
+ assert self.components.get_component('foo') is second
def test_lazy_registration_factory_does_not_remove_from_list_on_error(self):
class ArbitraryError(Exception):
@@ -772,20 +755,20 @@ class TestComponentLocator(unittest.TestCase):
self.components.lazy_register_component('foo', bad_factory)
- with self.assertRaises(ArbitraryError):
+ with pytest.raises(ArbitraryError):
self.components.get_component('foo')
# Trying again should raise the same exception,
# not an ValueError("Unknown component")
- with self.assertRaises(ArbitraryError):
+ with pytest.raises(ArbitraryError):
self.components.get_component('foo')
class TestDefaultClientConfig(BaseSessionTest):
def test_new_session_has_no_default_client_config(self):
- self.assertEqual(self.session.get_default_client_config(), None)
+ assert self.session.get_default_client_config() is None
def test_set_and_get_client_config(self):
client_config = botocore.config.Config()
self.session.set_default_client_config(client_config)
- self.assertIs(self.session.get_default_client_config(), client_config)
+ assert self.session.get_default_client_config() is client_config
diff --git a/tests/unit/test_signers.py b/tests/unit/test_signers.py
index 16b06f76..5eeb715e 100644
--- a/tests/unit/test_signers.py
+++ b/tests/unit/test_signers.py
@@ -13,6 +13,7 @@
from tests import mock
import datetime
import json
+import pytest
from dateutil.tz import tzutc
@@ -50,13 +51,13 @@ class BaseSignerTest(unittest.TestCase):
class TestSigner(BaseSignerTest):
def test_region_name(self):
- self.assertEqual(self.signer.region_name, 'region_name')
+ assert self.signer.region_name == 'region_name'
def test_signature_version(self):
- self.assertEqual(self.signer.signature_version, 'v4')
+ assert self.signer.signature_version == 'v4'
def test_signing_name(self):
- self.assertEqual(self.signer.signing_name, 'signing_name')
+ assert self.signer.signing_name == 'signing_name'
def test_region_required_for_sigv4(self):
self.signer = RequestSigner(
@@ -64,7 +65,7 @@ class TestSigner(BaseSignerTest):
self.credentials, self.emitter
)
- with self.assertRaises(NoRegionError):
+ with pytest.raises(NoRegionError):
self.signer.sign('operation_name', self.request)
def test_get_auth(self):
@@ -73,7 +74,7 @@ class TestSigner(BaseSignerTest):
{'v4': auth_cls}):
auth = self.signer.get_auth('service_name', 'region_name')
- self.assertEqual(auth, auth_cls.return_value)
+ assert auth == auth_cls.return_value
auth_cls.assert_called_with(
credentials=self.fixed_credentials,
service_name='service_name',
@@ -86,14 +87,14 @@ class TestSigner(BaseSignerTest):
auth = self.signer.get_auth(
'service_name', 'region_name', signature_version='v4-custom')
- self.assertEqual(auth, auth_cls.return_value)
+ assert auth == auth_cls.return_value
auth_cls.assert_called_with(
credentials=self.fixed_credentials,
service_name='service_name',
region_name='region_name')
def test_get_auth_bad_override(self):
- with self.assertRaises(UnknownSignatureVersionError):
+ with pytest.raises(UnknownSignatureVersionError):
self.signer.get_auth('service_name', 'region_name',
signature_version='bad')
@@ -208,7 +209,7 @@ class TestSigner(BaseSignerTest):
url = self.signer.generate_presigned_url(
request_dict, 'operation_name')
- self.assertEqual(url, 'https://foo.com')
+ assert url == 'https://foo.com'
def test_generate_presigned_url(self):
auth = mock.Mock()
@@ -229,7 +230,7 @@ class TestSigner(BaseSignerTest):
auth.assert_called_with(
credentials=self.fixed_credentials, region_name='region_name',
service_name='signing_name', expires=3600)
- self.assertEqual(presigned_url, 'https://foo.com')
+ assert presigned_url == 'https://foo.com'
def test_generate_presigned_url_with_region_override(self):
auth = mock.Mock()
@@ -251,7 +252,7 @@ class TestSigner(BaseSignerTest):
auth.assert_called_with(
credentials=self.fixed_credentials, region_name='us-west-2',
service_name='signing_name', expires=3600)
- self.assertEqual(presigned_url, 'https://foo.com')
+ assert presigned_url == 'https://foo.com'
def test_generate_presigned_url_with_exipres_in(self):
auth = mock.Mock()
@@ -273,7 +274,7 @@ class TestSigner(BaseSignerTest):
credentials=self.fixed_credentials,
region_name='region_name',
expires=900, service_name='signing_name')
- self.assertEqual(presigned_url, 'https://foo.com')
+ assert presigned_url == 'https://foo.com'
def test_presigned_url_throws_unsupported_signature_error(self):
request_dict = {
@@ -287,7 +288,7 @@ class TestSigner(BaseSignerTest):
self.signer = RequestSigner(
ServiceId('service_name'), 'region_name', 'signing_name',
'foo', self.credentials, self.emitter)
- with self.assertRaises(UnsupportedSignatureVersionError):
+ with pytest.raises(UnsupportedSignatureVersionError):
self.signer.generate_presigned_url(
request_dict, operation_name='foo')
@@ -305,7 +306,7 @@ class TestSigner(BaseSignerTest):
with mock.patch.dict(botocore.auth.AUTH_TYPE_MAPS,
{'v4': auth_cls}):
auth = self.signer.get_auth('service_name', 'region_name')
- self.assertEqual(auth, auth_cls.return_value)
+ assert auth == auth_cls.return_value
# Note we're called with 'foo', 'bar', 'baz', and *not*
# 'a', 'b', 'c'.
auth_cls.assert_called_with(
@@ -344,8 +345,8 @@ class TestSigner(BaseSignerTest):
with mock.patch.dict(botocore.auth.AUTH_TYPE_MAPS, auth_types):
self.signer.sign('operation_name', self.request,
signing_type='standard')
- self.assertFalse(post_auth.called)
- self.assertFalse(query_auth.called)
+ assert not post_auth.called
+ assert not query_auth.called
auth.assert_called_with(
credentials=ReadOnlyCredentials('key', 'secret', None),
service_name='signing_name',
@@ -364,8 +365,8 @@ class TestSigner(BaseSignerTest):
with mock.patch.dict(botocore.auth.AUTH_TYPE_MAPS, auth_types):
self.signer.sign('operation_name', self.request,
signing_type='presign-url')
- self.assertFalse(post_auth.called)
- self.assertFalse(auth.called)
+ assert not post_auth.called
+ assert not auth.called
query_auth.assert_called_with(
credentials=ReadOnlyCredentials('key', 'secret', None),
service_name='signing_name',
@@ -384,8 +385,8 @@ class TestSigner(BaseSignerTest):
with mock.patch.dict(botocore.auth.AUTH_TYPE_MAPS, auth_types):
self.signer.sign('operation_name', self.request,
signing_type='presign-post')
- self.assertFalse(auth.called)
- self.assertFalse(query_auth.called)
+ assert not auth.called
+ assert not query_auth.called
post_auth.assert_called_with(
credentials=ReadOnlyCredentials('key', 'secret', None),
service_name='signing_name',
@@ -497,7 +498,7 @@ class TestSigner(BaseSignerTest):
credentials=self.fixed_credentials,
region_name='region_name',
expires=3600, service_name='foo')
- self.assertEqual(presigned_url, 'https://foo.com')
+ assert presigned_url == 'https://foo.com'
def test_unknown_signer_raises_unknown_on_standard(self):
auth = mock.Mock()
@@ -506,7 +507,7 @@ class TestSigner(BaseSignerTest):
}
self.emitter.emit_until_response.return_value = (None, 'custom')
with mock.patch.dict(botocore.auth.AUTH_TYPE_MAPS, auth_types):
- with self.assertRaises(UnknownSignatureVersionError):
+ with pytest.raises(UnknownSignatureVersionError):
self.signer.sign('operation_name', self.request,
signing_type='standard')
@@ -517,11 +518,11 @@ class TestSigner(BaseSignerTest):
}
self.emitter.emit_until_response.return_value = (None, 'custom')
with mock.patch.dict(botocore.auth.AUTH_TYPE_MAPS, auth_types):
- with self.assertRaises(UnsupportedSignatureVersionError):
+ with pytest.raises(UnsupportedSignatureVersionError):
self.signer.sign('operation_name', self.request,
signing_type='presign-url')
- with self.assertRaises(UnsupportedSignatureVersionError):
+ with pytest.raises(UnsupportedSignatureVersionError):
self.signer.sign('operation_name', self.request,
signing_type='presign-post')
@@ -539,8 +540,8 @@ class TestCloudfrontSigner(BaseSignerTest):
expected = (
'{"Statement":[{"Resource":"foo",'
'"Condition":{"DateLessThan":{"AWS:EpochTime":1451606400}}}]}')
- self.assertEqual(json.loads(policy), json.loads(expected))
- self.assertEqual(policy, expected) # This is to ensure the right order
+ assert json.loads(policy) == json.loads(expected)
+ assert policy == expected # This is to ensure the right order
def test_build_custom_policy(self):
policy = self.signer.build_policy(
@@ -557,7 +558,7 @@ class TestCloudfrontSigner(BaseSignerTest):
},
}]
}
- self.assertEqual(json.loads(policy), expected)
+ assert json.loads(policy) == expected
def test_generate_presign_url_with_expire_time(self):
signed_url = self.signer.generate_presigned_url(
@@ -626,15 +627,14 @@ class TestS3PostPresigner(BaseSignerTest):
self.auth.assert_called_with(
credentials=self.fixed_credentials, region_name='region_name',
service_name='signing_name')
- self.assertEqual(self.add_auth.call_count, 1)
+ assert self.add_auth.call_count == 1
ref_request = self.add_auth.call_args[0][0]
ref_policy = ref_request.context['s3-presign-post-policy']
- self.assertEqual(ref_policy['expiration'], '2014-03-10T18:02:55Z')
- self.assertEqual(ref_policy['conditions'], [])
+ assert ref_policy['expiration'] == '2014-03-10T18:02:55Z'
+ assert ref_policy['conditions'] == []
- self.assertEqual(post_form_args['url'],
- 'https://s3.amazonaws.com/mybucket')
- self.assertEqual(post_form_args['fields'], {})
+ assert post_form_args['url'] == 'https://s3.amazonaws.com/mybucket'
+ assert post_form_args['fields'] == {}
def test_generate_presigned_post_emits_choose_signer(self):
with mock.patch.dict(botocore.auth.AUTH_TYPE_MAPS,
@@ -680,7 +680,7 @@ class TestS3PostPresigner(BaseSignerTest):
's3-query': auth
}
with mock.patch.dict(botocore.auth.AUTH_TYPE_MAPS, auth_types):
- with self.assertRaises(UnsupportedSignatureVersionError):
+ with pytest.raises(UnsupportedSignatureVersionError):
self.signer.generate_presigned_post(self.request_dict)
def test_generate_unsigned_post(self):
@@ -691,7 +691,7 @@ class TestS3PostPresigner(BaseSignerTest):
post_form_args = self.signer.generate_presigned_post(
self.request_dict)
expected = {'fields': {}, 'url': 'https://s3.amazonaws.com/mybucket'}
- self.assertEqual(post_form_args, expected)
+ assert post_form_args == expected
def test_generate_presigned_post_with_conditions(self):
conditions = [
@@ -705,10 +705,10 @@ class TestS3PostPresigner(BaseSignerTest):
self.auth.assert_called_with(
credentials=self.fixed_credentials, region_name='region_name',
service_name='signing_name')
- self.assertEqual(self.add_auth.call_count, 1)
+ assert self.add_auth.call_count == 1
ref_request = self.add_auth.call_args[0][0]
ref_policy = ref_request.context['s3-presign-post-policy']
- self.assertEqual(ref_policy['conditions'], conditions)
+ assert ref_policy['conditions'] == conditions
def test_generate_presigned_post_with_region_override(self):
with mock.patch.dict(botocore.auth.AUTH_TYPE_MAPS,
@@ -732,7 +732,7 @@ class TestS3PostPresigner(BaseSignerTest):
ServiceId('service_name'), 'region_name', 'signing_name',
'foo', self.credentials, self.emitter)
self.signer = S3PostPresigner(self.request_signer)
- with self.assertRaises(UnsupportedSignatureVersionError):
+ with pytest.raises(UnsupportedSignatureVersionError):
self.signer.generate_presigned_post(request_dict)
@@ -792,11 +792,11 @@ class TestGenerateUrl(unittest.TestCase):
operation_name='GetObject')
def test_generate_presigned_url_unknown_method_name(self):
- with self.assertRaises(UnknownClientMethodError):
+ with pytest.raises(UnknownClientMethodError):
self.client.generate_presigned_url('getobject')
def test_generate_presigned_url_missing_required_params(self):
- with self.assertRaises(ParamValidationError):
+ with pytest.raises(ParamValidationError):
self.client.generate_presigned_url('get_object')
def test_generate_presigned_url_expires(self):
@@ -840,13 +840,10 @@ class TestGenerateUrl(unittest.TestCase):
events_emitted = [
emit_call[0][0] for emit_call in emitter.emit.call_args_list
]
- self.assertEqual(
- events_emitted,
- [
+ assert events_emitted == [
'provide-client-params.s3.GetObject',
'before-parameter-build.s3.GetObject'
]
- )
def test_generate_presign_url_emits_is_presign_in_context(self):
emitter = mock.Mock(HierarchicalEmitter)
@@ -858,11 +855,9 @@ class TestGenerateUrl(unittest.TestCase):
emit_call[1] for emit_call in emitter.emit.call_args_list
]
for kwargs in kwargs_emitted:
- self.assertTrue(
- kwargs.get('context', {}).get('is_presign_request'),
+ assert kwargs.get('context', {}).get('is_presign_request'), (
'The context did not have is_presign_request set to True for '
- 'the following kwargs emitted: %s' % kwargs
- )
+ 'the following kwargs emitted: %s' % kwargs)
class TestGeneratePresignedPost(unittest.TestCase):
@@ -885,15 +880,11 @@ class TestGeneratePresignedPost(unittest.TestCase):
request_dict = post_kwargs['request_dict']
fields = post_kwargs['fields']
conditions = post_kwargs['conditions']
- self.assertEqual(
- request_dict['url'], 'https://s3.amazonaws.com/mybucket')
- self.assertEqual(post_kwargs['expires_in'], 3600)
- self.assertEqual(
- conditions,
- [{'bucket': 'mybucket'}, {'key': 'mykey'}])
- self.assertEqual(
- fields,
- {'key': 'mykey'})
+ assert request_dict['url'] == 'https://s3.amazonaws.com/mybucket'
+ assert post_kwargs['expires_in'] == 3600
+ assert conditions == [
+ {'bucket': 'mybucket'}, {'key': 'mykey'}]
+ assert fields == {'key': 'mykey'}
def test_generate_presigned_post_with_filename(self):
self.key = 'myprefix/${filename}'
@@ -903,15 +894,11 @@ class TestGeneratePresignedPost(unittest.TestCase):
request_dict = post_kwargs['request_dict']
fields = post_kwargs['fields']
conditions = post_kwargs['conditions']
- self.assertEqual(
- request_dict['url'], 'https://s3.amazonaws.com/mybucket')
- self.assertEqual(post_kwargs['expires_in'], 3600)
- self.assertEqual(
- conditions,
- [{'bucket': 'mybucket'}, ['starts-with', '$key', 'myprefix/']])
- self.assertEqual(
- fields,
- {'key': 'myprefix/${filename}'})
+ assert request_dict['url'] == 'https://s3.amazonaws.com/mybucket'
+ assert post_kwargs['expires_in'] == 3600
+ assert conditions == [
+ {'bucket': 'mybucket'}, ['starts-with', '$key', 'myprefix/']]
+ assert fields == {'key': 'myprefix/${filename}'}
def test_generate_presigned_post_expires(self):
self.client.generate_presigned_post(
@@ -920,15 +907,11 @@ class TestGeneratePresignedPost(unittest.TestCase):
request_dict = post_kwargs['request_dict']
fields = post_kwargs['fields']
conditions = post_kwargs['conditions']
- self.assertEqual(
- request_dict['url'], 'https://s3.amazonaws.com/mybucket')
- self.assertEqual(post_kwargs['expires_in'], 50)
- self.assertEqual(
- conditions,
- [{'bucket': 'mybucket'}, {'key': 'mykey'}])
- self.assertEqual(
- fields,
- {'key': 'mykey'})
+ assert request_dict['url'] == 'https://s3.amazonaws.com/mybucket'
+ assert post_kwargs['expires_in'] == 50
+ assert conditions == [
+ {'bucket': 'mybucket'}, {'key': 'mykey'}]
+ assert fields == {'key': 'mykey'}
def test_generate_presigned_post_with_prefilled(self):
conditions = [{'acl': 'public-read'}]
@@ -937,24 +920,22 @@ class TestGeneratePresignedPost(unittest.TestCase):
self.client.generate_presigned_post(
self.bucket, self.key, Fields=fields, Conditions=conditions)
- self.assertEqual(fields, {'acl': 'public-read'})
+ assert fields == {'acl': 'public-read'}
_, post_kwargs = self.presign_post_mock.call_args
request_dict = post_kwargs['request_dict']
fields = post_kwargs['fields']
conditions = post_kwargs['conditions']
- self.assertEqual(
- request_dict['url'], 'https://s3.amazonaws.com/mybucket')
- self.assertEqual(
- conditions,
- [{'acl': 'public-read'}, {'bucket': 'mybucket'}, {'key': 'mykey'}])
- self.assertEqual(fields['acl'], 'public-read')
- self.assertEqual(
- fields, {'key': 'mykey', 'acl': 'public-read'})
+ assert request_dict['url'] == 'https://s3.amazonaws.com/mybucket'
+ assert conditions == [
+ {'acl': 'public-read'}, {'bucket': 'mybucket'}, {'key': 'mykey'}]
+
+ assert fields['acl'] == 'public-read'
+ assert fields == {'key': 'mykey', 'acl': 'public-read'}
def test_generate_presigned_post_non_s3_client(self):
self.client = self.session.create_client('ec2', 'us-west-2')
- with self.assertRaises(AttributeError):
+ with pytest.raises(AttributeError):
self.client.generate_presigned_post()
@@ -1001,7 +982,7 @@ class TestGenerateDBAuthToken(BaseSignerTest):
result = generate_db_auth_token(
self.client, hostname, port, username, Region=region)
- self.assertIn(region, result)
+ assert region in result
# The hostname won't be changed even if a different region is specified
- self.assertIn(hostname, result)
+ assert hostname in result
diff --git a/tests/unit/test_stub.py b/tests/unit/test_stub.py
index eb276562..7062a53c 100644
--- a/tests/unit/test_stub.py
+++ b/tests/unit/test_stub.py
@@ -13,6 +13,7 @@
from tests import unittest
from tests import mock
+import pytest
from botocore.stub import Stubber
from botocore.exceptions import ParamValidationError, StubResponseError, UnStubbedResponseError
@@ -89,17 +90,17 @@ class TestStubber(unittest.TestCase):
response = {'foo': 'bar'}
self.stubber.add_response('foo', response)
- with self.assertRaises(AssertionError):
+ with pytest.raises(AssertionError):
self.stubber.assert_no_pending_responses()
def test_add_response_fails_when_missing_client_method(self):
del self.client.foo
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
self.stubber.add_response('foo', {})
def test_validates_service_response(self):
self.stubber.add_response('foo', {})
- self.assertTrue(self.validate_parameters_mock.called)
+ assert self.validate_parameters_mock.called
def test_validate_ignores_response_metadata(self):
service_response = {'ResponseMetadata': {'foo': 'bar'}}
@@ -125,8 +126,7 @@ class TestStubber(unittest.TestCase):
{}, output_shape)
# Make sure service response hasn't been mutated
- self.assertEqual(
- service_response, {'ResponseMetadata': {'foo': 'bar'}})
+ assert service_response == {'ResponseMetadata': {'foo': 'bar'}}
def test_validates_on_empty_output_shape(self):
service_model = ServiceModel({
@@ -139,7 +139,7 @@ class TestStubber(unittest.TestCase):
})
self.client.meta.service_model = service_model
- with self.assertRaises(ParamValidationError):
+ with pytest.raises(ParamValidationError):
self.stubber.add_response('TestOperation', {'foo': 'bar'})
def test_get_response(self):
@@ -147,8 +147,8 @@ class TestStubber(unittest.TestCase):
self.stubber.add_response('foo', service_response)
self.stubber.activate()
response = self.emit_get_response_event()
- self.assertEqual(response[1], service_response)
- self.assertEqual(response[0].status_code, 200)
+ assert response[1] == service_response
+ assert response[0].status_code == 200
def test_get_client_error_response(self):
error_code = "foo"
@@ -156,8 +156,8 @@ class TestStubber(unittest.TestCase):
self.stubber.add_client_error('foo', error_code, service_message)
self.stubber.activate()
response = self.emit_get_response_event()
- self.assertEqual(response[1]['Error']['Message'], service_message)
- self.assertEqual(response[1]['Error']['Code'], error_code)
+ assert response[1]['Error']['Message'] == service_message
+ assert response[1]['Error']['Code'] == error_code
def test_get_client_error_with_extra_error_meta(self):
error_code = "foo"
@@ -172,8 +172,8 @@ class TestStubber(unittest.TestCase):
with self.stubber:
response = self.emit_get_response_event()
error = response[1]['Error']
- self.assertIn('Endpoint', error)
- self.assertEqual(error['Endpoint'], "https://foo.bar.baz")
+ assert 'Endpoint' in error
+ assert error['Endpoint'] in "https://foo.bar.baz"
def test_get_client_error_with_extra_response_meta(self):
error_code = "foo"
@@ -188,16 +188,16 @@ class TestStubber(unittest.TestCase):
with self.stubber:
response = self.emit_get_response_event()
actual_response_meta = response[1]['ResponseMetadata']
- self.assertIn('RequestId', actual_response_meta)
- self.assertEqual(actual_response_meta['RequestId'], "79104EXAMPLEB723")
+ assert 'RequestId' in actual_response_meta
+ assert actual_response_meta['RequestId'] in "79104EXAMPLEB723"
def test_get_response_errors_with_no_stubs(self):
self.stubber.activate()
- with self.assertRaises(UnStubbedResponseError):
+ with pytest.raises(UnStubbedResponseError):
self.emit_get_response_event()
def test_assert_no_responses_remaining(self):
self.stubber.add_response('foo', {})
- with self.assertRaises(AssertionError):
+ with pytest.raises(AssertionError):
self.stubber.assert_no_pending_responses()
diff --git a/tests/unit/test_translate.py b/tests/unit/test_translate.py
index 15fbd047..bbe54c4a 100644
--- a/tests/unit/test_translate.py
+++ b/tests/unit/test_translate.py
@@ -52,9 +52,8 @@ class TestBuildRetryConfig(unittest.TestCase):
def test_inject_retry_config(self):
retry = translate.build_retry_config('sts', self.retry['retry'],
self.retry['definitions'])
- self.assertIn('__default__', retry)
- self.assertEqual(
- retry['__default__'], {
+ assert '__default__' in retry
+ assert retry['__default__'] == {
"max_attempts": 5,
"delay": "service_specific_delay",
"policies": {
@@ -63,25 +62,23 @@ class TestBuildRetryConfig(unittest.TestCase):
"service_one": "service",
}
}
- )
# Policies should be merged.
operation_config = retry['AssumeRole']
- self.assertEqual(operation_config['policies']['name'], 'policy')
+ assert operation_config['policies']['name'] == 'policy'
def test_resolve_reference(self):
retry = translate.build_retry_config('sts', self.retry['retry'],
self.retry['definitions'])
operation_config = retry['AssumeRole']
# And we should resolve references.
- self.assertEqual(operation_config['policies']['other'],
- {"from": {"definition": "file"}})
+ assert operation_config['policies']['other'] == {
+ "from": {"definition": "file"}}
def test_service_specific_defaults_no_mutate_default_retry(self):
retry = translate.build_retry_config('sts', self.retry['retry'],
self.retry['definitions'])
# sts has a specific policy
- self.assertEqual(
- retry['__default__'], {
+ assert retry['__default__'] == {
"max_attempts": 5,
"delay": "service_specific_delay",
"policies": {
@@ -90,13 +87,10 @@ class TestBuildRetryConfig(unittest.TestCase):
"service_one": "service",
}
}
- )
# The general defaults for the upstream model should not have been
# mutated from building the retry config
- self.assertEqual(
- self.retry['retry']['__default__'],
- {
+ assert self.retry['retry']['__default__'] == {
"max_attempts": 5,
"delay": "global_delay",
"policies": {
@@ -104,14 +98,12 @@ class TestBuildRetryConfig(unittest.TestCase):
"override_me": "global",
}
}
- )
def test_client_override_max_attempts(self):
retry = translate.build_retry_config(
'sts', self.retry['retry'], self.retry['definitions'],
client_retry_config={'max_attempts': 9}
)
- self.assertEqual(retry['__default__']['max_attempts'], 10)
+ assert retry['__default__']['max_attempts'] == 10
# But it should not mutate the original retry model
- self.assertEqual(
- self.retry['retry']['__default__']['max_attempts'], 5)
+ assert self.retry['retry']['__default__']['max_attempts'] == 5
diff --git a/tests/unit/test_utils.py b/tests/unit/test_utils.py
index 07fad9f8..0466e1c4 100644
--- a/tests/unit/test_utils.py
+++ b/tests/unit/test_utils.py
@@ -16,6 +16,7 @@ from dateutil.tz import tzutc, tzoffset
import datetime
import copy
from tests import mock
+import pytest
import botocore
from botocore import xform_name
@@ -80,26 +81,26 @@ from botocore.config import Config
class TestEnsureBoolean(unittest.TestCase):
def test_boolean_true(self):
- self.assertEqual(ensure_boolean(True), True)
+ assert ensure_boolean(True) == True
def test_boolean_false(self):
- self.assertEqual(ensure_boolean(False), False)
+ assert ensure_boolean(False) == False
def test_string_true(self):
- self.assertEqual(ensure_boolean('True'), True)
+ assert ensure_boolean('True') == True
def test_string_false(self):
- self.assertEqual(ensure_boolean('False'), False)
+ assert ensure_boolean('False') == False
def test_string_lowercase_true(self):
- self.assertEqual(ensure_boolean('true'), True)
+ assert ensure_boolean('true') == True
class TestIsJSONValueHeader(unittest.TestCase):
def test_no_serialization_section(self):
shape = mock.Mock()
shape.type_name = 'string'
- self.assertFalse(is_json_value_header(shape))
+ assert not is_json_value_header(shape)
def test_non_jsonvalue_shape(self):
shape = mock.Mock()
@@ -107,7 +108,7 @@ class TestIsJSONValueHeader(unittest.TestCase):
'location': 'header'
}
shape.type_name = 'string'
- self.assertFalse(is_json_value_header(shape))
+ assert not is_json_value_header(shape)
def test_non_header_jsonvalue_shape(self):
shape = mock.Mock()
@@ -115,7 +116,7 @@ class TestIsJSONValueHeader(unittest.TestCase):
'jsonvalue': True
}
shape.type_name = 'string'
- self.assertFalse(is_json_value_header(shape))
+ assert not is_json_value_header(shape)
def test_non_string_jsonvalue_shape(self):
shape = mock.Mock()
@@ -124,7 +125,7 @@ class TestIsJSONValueHeader(unittest.TestCase):
'jsonvalue': True
}
shape.type_name = 'integer'
- self.assertFalse(is_json_value_header(shape))
+ assert not is_json_value_header(shape)
def test_json_value_header(self):
shape = mock.Mock()
@@ -133,91 +134,83 @@ class TestIsJSONValueHeader(unittest.TestCase):
'location': 'header'
}
shape.type_name = 'string'
- self.assertTrue(is_json_value_header(shape))
+ assert is_json_value_header(shape) is True
class TestURINormalization(unittest.TestCase):
def test_remove_dot_segments(self):
- self.assertEqual(remove_dot_segments('../foo'), 'foo')
- self.assertEqual(remove_dot_segments('../../foo'), 'foo')
- self.assertEqual(remove_dot_segments('./foo'), 'foo')
- self.assertEqual(remove_dot_segments('/./'), '/')
- self.assertEqual(remove_dot_segments('/../'), '/')
- self.assertEqual(remove_dot_segments('/foo/bar/baz/../qux'),
- '/foo/bar/qux')
- self.assertEqual(remove_dot_segments('/foo/..'), '/')
- self.assertEqual(remove_dot_segments('foo/bar/baz'), 'foo/bar/baz')
- self.assertEqual(remove_dot_segments('..'), '')
- self.assertEqual(remove_dot_segments('.'), '')
- self.assertEqual(remove_dot_segments('/.'), '/')
- self.assertEqual(remove_dot_segments('/.foo'), '/.foo')
- self.assertEqual(remove_dot_segments('/..foo'), '/..foo')
- self.assertEqual(remove_dot_segments(''), '')
- self.assertEqual(remove_dot_segments('/a/b/c/./../../g'), '/a/g')
- self.assertEqual(remove_dot_segments('mid/content=5/../6'), 'mid/6')
+ assert remove_dot_segments('../foo') == 'foo'
+ assert remove_dot_segments('../../foo') == 'foo'
+ assert remove_dot_segments('./foo') == 'foo'
+ assert remove_dot_segments('/./') == '/'
+ assert remove_dot_segments('/../') == '/'
+ assert remove_dot_segments('/foo/bar/baz/../qux') == '/foo/bar/qux'
+ assert remove_dot_segments('/foo/..') == '/'
+ assert remove_dot_segments('foo/bar/baz') == 'foo/bar/baz'
+ assert remove_dot_segments('..') == ''
+ assert remove_dot_segments('.') == ''
+ assert remove_dot_segments('/.') == '/'
+ assert remove_dot_segments('/.foo') == '/.foo'
+ assert remove_dot_segments('/..foo') == '/..foo'
+ assert remove_dot_segments('') == ''
+ assert remove_dot_segments('/a/b/c/./../../g') == '/a/g'
+ assert remove_dot_segments('mid/content=5/../6') == 'mid/6'
# I don't think this is RFC compliant...
- self.assertEqual(remove_dot_segments('//foo//'), '/foo/')
+ assert remove_dot_segments('//foo//') == '/foo/'
def test_empty_url_normalization(self):
- self.assertEqual(normalize_url_path(''), '/')
+ assert normalize_url_path('') == '/'
class TestTransformName(unittest.TestCase):
def test_upper_camel_case(self):
- self.assertEqual(xform_name('UpperCamelCase'), 'upper_camel_case')
- self.assertEqual(xform_name('UpperCamelCase', '-'), 'upper-camel-case')
+ assert xform_name('UpperCamelCase') == 'upper_camel_case'
+ assert xform_name('UpperCamelCase', '-') == 'upper-camel-case'
def test_lower_camel_case(self):
- self.assertEqual(xform_name('lowerCamelCase'), 'lower_camel_case')
- self.assertEqual(xform_name('lowerCamelCase', '-'), 'lower-camel-case')
+ assert xform_name('lowerCamelCase') == 'lower_camel_case'
+ assert xform_name('lowerCamelCase', '-') == 'lower-camel-case'
def test_consecutive_upper_case(self):
- self.assertEqual(xform_name('HTTPHeaders'), 'http_headers')
- self.assertEqual(xform_name('HTTPHeaders', '-'), 'http-headers')
+ assert xform_name('HTTPHeaders') == 'http_headers'
+ assert xform_name('HTTPHeaders', '-') == 'http-headers'
def test_consecutive_upper_case_middle_string(self):
- self.assertEqual(xform_name('MainHTTPHeaders'), 'main_http_headers')
- self.assertEqual(xform_name('MainHTTPHeaders', '-'),
- 'main-http-headers')
+ assert xform_name('MainHTTPHeaders') == 'main_http_headers'
+ assert xform_name('MainHTTPHeaders', '-') == 'main-http-headers'
def test_s3_prefix(self):
- self.assertEqual(xform_name('S3BucketName'), 's3_bucket_name')
+ assert xform_name('S3BucketName') == 's3_bucket_name'
def test_already_snake_cased(self):
- self.assertEqual(xform_name('leave_alone'), 'leave_alone')
- self.assertEqual(xform_name('s3_bucket_name'), 's3_bucket_name')
- self.assertEqual(xform_name('bucket_s3_name'), 'bucket_s3_name')
+ assert xform_name('leave_alone') == 'leave_alone'
+ assert xform_name('s3_bucket_name') == 's3_bucket_name'
+ assert xform_name('bucket_s3_name') == 'bucket_s3_name'
def test_special_cases(self):
# Some patterns don't actually match the rules we expect.
- self.assertEqual(xform_name('SwapEnvironmentCNAMEs'),
- 'swap_environment_cnames')
- self.assertEqual(xform_name('SwapEnvironmentCNAMEs', '-'),
- 'swap-environment-cnames')
- self.assertEqual(xform_name('CreateCachediSCSIVolume', '-'),
- 'create-cached-iscsi-volume')
- self.assertEqual(xform_name('DescribeCachediSCSIVolumes', '-'),
- 'describe-cached-iscsi-volumes')
- self.assertEqual(xform_name('DescribeStorediSCSIVolumes', '-'),
- 'describe-stored-iscsi-volumes')
- self.assertEqual(xform_name('CreateStorediSCSIVolume', '-'),
- 'create-stored-iscsi-volume')
+ assert xform_name('SwapEnvironmentCNAMEs') == 'swap_environment_cnames'
+ assert xform_name('SwapEnvironmentCNAMEs', '-') == 'swap-environment-cnames'
+ assert xform_name('CreateCachediSCSIVolume', '-') == 'create-cached-iscsi-volume'
+ assert xform_name('DescribeCachediSCSIVolumes', '-') == 'describe-cached-iscsi-volumes'
+ assert xform_name('DescribeStorediSCSIVolumes', '-') == 'describe-stored-iscsi-volumes'
+ assert xform_name('CreateStorediSCSIVolume', '-') == 'create-stored-iscsi-volume'
def test_special_case_ends_with_s(self):
- self.assertEqual(xform_name('GatewayARNs', '-'), 'gateway-arns')
+ assert xform_name('GatewayARNs', '-') == 'gateway-arns'
def test_partial_rename(self):
transformed = xform_name('IPV6', '-')
- self.assertEqual(transformed, 'ipv6')
+ assert transformed == 'ipv6'
transformed = xform_name('IPV6', '_')
- self.assertEqual(transformed, 'ipv6')
+ assert transformed == 'ipv6'
def test_s3_partial_rename(self):
transformed = xform_name('s3Resources', '-')
- self.assertEqual(transformed, 's3-resources')
+ assert transformed == 's3-resources'
transformed = xform_name('s3Resources', '_')
- self.assertEqual(transformed, 's3_resources')
+ assert transformed == 's3_resources'
class TestValidateJMESPathForSet(unittest.TestCase):
@@ -234,16 +227,16 @@ class TestValidateJMESPathForSet(unittest.TestCase):
}
def test_invalid_exp(self):
- with self.assertRaises(InvalidExpressionError):
+ with pytest.raises(InvalidExpressionError):
validate_jmespath_for_set('Response.*.Name')
- with self.assertRaises(InvalidExpressionError):
+ with pytest.raises(InvalidExpressionError):
validate_jmespath_for_set('Response.Things[0]')
- with self.assertRaises(InvalidExpressionError):
+ with pytest.raises(InvalidExpressionError):
validate_jmespath_for_set('')
- with self.assertRaises(InvalidExpressionError):
+ with pytest.raises(InvalidExpressionError):
validate_jmespath_for_set('.')
@@ -262,86 +255,81 @@ class TestSetValueFromJMESPath(unittest.TestCase):
def test_single_depth_existing(self):
set_value_from_jmespath(self.data, 'Marker', 'new-token')
- self.assertEqual(self.data['Marker'], 'new-token')
+ assert self.data['Marker'] == 'new-token'
def test_single_depth_new(self):
- self.assertFalse('Limit' in self.data)
+ assert 'Limit' not in self.data
set_value_from_jmespath(self.data, 'Limit', 100)
- self.assertEqual(self.data['Limit'], 100)
+ assert self.data['Limit'] == 100
def test_multiple_depth_existing(self):
set_value_from_jmespath(self.data, 'Response.Thing.Name', 'New Name')
- self.assertEqual(self.data['Response']['Thing']['Name'], 'New Name')
+ assert self.data['Response']['Thing']['Name'] == 'New Name'
def test_multiple_depth_new(self):
- self.assertFalse('Brand' in self.data)
+ assert 'Brand' not in self.data
set_value_from_jmespath(self.data, 'Brand.New', {'abc': 123})
- self.assertEqual(self.data['Brand']['New']['abc'], 123)
+ assert self.data['Brand']['New']['abc'] == 123
class TestParseEC2CredentialsFile(unittest.TestCase):
def test_parse_ec2_content(self):
contents = "AWSAccessKeyId=a\nAWSSecretKey=b\n"
- self.assertEqual(parse_key_val_file_contents(contents),
- {'AWSAccessKeyId': 'a',
- 'AWSSecretKey': 'b'})
+ assert parse_key_val_file_contents(contents) == {
+ 'AWSAccessKeyId': 'a',
+ 'AWSSecretKey': 'b'}
def test_parse_ec2_content_empty(self):
contents = ""
- self.assertEqual(parse_key_val_file_contents(contents), {})
+ assert parse_key_val_file_contents(contents) == {}
def test_key_val_pair_with_blank_lines(self):
# The \n\n has an extra blank between the access/secret keys.
contents = "AWSAccessKeyId=a\n\nAWSSecretKey=b\n"
- self.assertEqual(parse_key_val_file_contents(contents),
- {'AWSAccessKeyId': 'a',
- 'AWSSecretKey': 'b'})
+ assert parse_key_val_file_contents(contents) == {
+ 'AWSAccessKeyId': 'a',
+ 'AWSSecretKey': 'b'}
def test_key_val_parser_lenient(self):
# Ignore any line that does not have a '=' char in it.
contents = "AWSAccessKeyId=a\nNOTKEYVALLINE\nAWSSecretKey=b\n"
- self.assertEqual(parse_key_val_file_contents(contents),
- {'AWSAccessKeyId': 'a',
- 'AWSSecretKey': 'b'})
+ assert parse_key_val_file_contents(contents) == {
+ 'AWSAccessKeyId': 'a',
+ 'AWSSecretKey': 'b'}
def test_multiple_equals_on_line(self):
contents = "AWSAccessKeyId=a\nAWSSecretKey=secret_key_with_equals=b\n"
- self.assertEqual(parse_key_val_file_contents(contents),
- {'AWSAccessKeyId': 'a',
- 'AWSSecretKey': 'secret_key_with_equals=b'})
+ assert parse_key_val_file_contents(contents) == {
+ 'AWSAccessKeyId': 'a',
+ 'AWSSecretKey': 'secret_key_with_equals=b'}
def test_os_error_raises_config_not_found(self):
mock_open = mock.Mock()
mock_open.side_effect = OSError()
- with self.assertRaises(ConfigNotFound):
+ with pytest.raises(ConfigNotFound):
parse_key_val_file('badfile', _open=mock_open)
class TestParseTimestamps(unittest.TestCase):
def test_parse_iso8601(self):
- self.assertEqual(
- parse_timestamp('1970-01-01T00:10:00.000Z'),
- datetime.datetime(1970, 1, 1, 0, 10, tzinfo=tzutc()))
+ assert parse_timestamp('1970-01-01T00:10:00.000Z') == datetime.datetime(
+ 1970, 1, 1, 0, 10, tzinfo=tzutc())
def test_parse_epoch(self):
- self.assertEqual(
- parse_timestamp(1222172800),
- datetime.datetime(2008, 9, 23, 12, 26, 40, tzinfo=tzutc()))
+ assert parse_timestamp(1222172800) == datetime.datetime(
+ 2008, 9, 23, 12, 26, 40, tzinfo=tzutc())
def test_parse_epoch_zero_time(self):
- self.assertEqual(
- parse_timestamp(0),
- datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc()))
+ assert parse_timestamp(0) == datetime.datetime(
+ 1970, 1, 1, 0, 0, 0, tzinfo=tzutc())
def test_parse_epoch_as_string(self):
- self.assertEqual(
- parse_timestamp('1222172800'),
- datetime.datetime(2008, 9, 23, 12, 26, 40, tzinfo=tzutc()))
+ assert parse_timestamp('1222172800') == datetime.datetime(
+ 2008, 9, 23, 12, 26, 40, tzinfo=tzutc())
def test_parse_rfc822(self):
- self.assertEqual(
- parse_timestamp('Wed, 02 Oct 2002 13:00:00 GMT'),
- datetime.datetime(2002, 10, 2, 13, 0, tzinfo=tzutc()))
+ assert parse_timestamp('Wed, 02 Oct 2002 13:00:00 GMT') == datetime.datetime(
+ 2002, 10, 2, 13, 0, tzinfo=tzutc())
def test_parse_gmt_in_uk_time(self):
# In the UK the time switches from GMT to BST and back as part of
@@ -351,12 +339,11 @@ class TestParseTimestamps(unittest.TestCase):
# instead of GMT. To remedy this issue we can provide a time zone
# context which will enforce GMT == UTC.
with mock.patch('time.tzname', ('GMT', 'BST')):
- self.assertEqual(
- parse_timestamp('Wed, 02 Oct 2002 13:00:00 GMT'),
- datetime.datetime(2002, 10, 2, 13, 0, tzinfo=tzutc()))
+ assert parse_timestamp('Wed, 02 Oct 2002 13:00:00 GMT') == datetime.datetime(
+ 2002, 10, 2, 13, 0, tzinfo=tzutc())
def test_parse_invalid_timestamp(self):
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
parse_timestamp('invalid date')
def test_parse_timestamp_fails_with_bad_tzinfo(self):
@@ -366,54 +353,49 @@ class TestParseTimestamps(unittest.TestCase):
mock_get_tzinfo_options = mock.MagicMock(return_value=(mock_tzinfo,))
with mock.patch('botocore.utils.get_tzinfo_options', mock_get_tzinfo_options):
- with self.assertRaises(RuntimeError):
+ with pytest.raises(RuntimeError):
parse_timestamp(0)
class TestDatetime2Timestamp(unittest.TestCase):
def test_datetime2timestamp_naive(self):
- self.assertEqual(
- datetime2timestamp(datetime.datetime(1970, 1, 2)), 86400)
+ assert datetime2timestamp(datetime.datetime(1970, 1, 2)) == 86400
def test_datetime2timestamp_aware(self):
tzinfo = tzoffset("BRST", -10800)
- self.assertEqual(
- datetime2timestamp(datetime.datetime(1970, 1, 2, tzinfo=tzinfo)),
- 97200)
+ assert datetime2timestamp(datetime.datetime(1970, 1, 2, tzinfo=tzinfo)) == 97200
class TestParseToUTCDatetime(unittest.TestCase):
def test_handles_utc_time(self):
original = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc())
- self.assertEqual(parse_to_aware_datetime(original), original)
+ assert parse_to_aware_datetime(original) == original
def test_handles_other_timezone(self):
tzinfo = tzoffset("BRST", -10800)
original = datetime.datetime(2014, 1, 1, 0, 0, 0, tzinfo=tzinfo)
- self.assertEqual(parse_to_aware_datetime(original), original)
+ assert parse_to_aware_datetime(original) == original
def test_handles_naive_datetime(self):
original = datetime.datetime(1970, 1, 1, 0, 0, 0)
expected = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc())
- self.assertEqual(parse_to_aware_datetime(original), expected)
+ assert parse_to_aware_datetime(original) == expected
def test_handles_string_epoch(self):
expected = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc())
- self.assertEqual(parse_to_aware_datetime('0'), expected)
+ assert parse_to_aware_datetime('0') == expected
def test_handles_int_epoch(self):
expected = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc())
- self.assertEqual(parse_to_aware_datetime(0), expected)
+ assert parse_to_aware_datetime(0) == expected
def test_handles_full_iso_8601(self):
expected = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc())
- self.assertEqual(
- parse_to_aware_datetime('1970-01-01T00:00:00Z'),
- expected)
+ assert parse_to_aware_datetime('1970-01-01T00:00:00Z') == expected
def test_year_only_iso_8601(self):
expected = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc())
- self.assertEqual(parse_to_aware_datetime('1970-01-01'), expected)
+ assert parse_to_aware_datetime('1970-01-01') == expected
class TestCachedProperty(unittest.TestCase):
@@ -424,8 +406,8 @@ class TestCachedProperty(unittest.TestCase):
return 'foo'
c = CacheMe()
- self.assertEqual(c.foo, 'foo')
- self.assertEqual(c.foo, 'foo')
+ assert c.foo == 'foo'
+ assert c.foo == 'foo'
def test_cached_property_only_called_once(self):
# Note: you would normally never want to cache
@@ -442,10 +424,10 @@ class TestCachedProperty(unittest.TestCase):
return self.counter
c = NoIncrement()
- self.assertEqual(c.current_value, 1)
+ assert c.current_value == 1
# If the property wasn't cached, the next value should be
# be 2, but because it's cached, we know the value will be 1.
- self.assertEqual(c.current_value, 1)
+ assert c.current_value == 1
class TestArgumentGenerator(unittest.TestCase):
@@ -456,7 +438,7 @@ class TestArgumentGenerator(unittest.TestCase):
shape = DenormalizedStructureBuilder().with_members(
model).build_model()
actual = self.arg_generator.generate_skeleton(shape)
- self.assertEqual(actual, generated_skeleton)
+ assert actual == generated_skeleton
def test_generate_string(self):
self.assert_skeleton_from_model_is(
@@ -477,7 +459,7 @@ class TestArgumentGenerator(unittest.TestCase):
model).build_model()
actual = self.arg_generator.generate_skeleton(shape)
- self.assertIn(actual['A'], enum_values)
+ assert actual['A'] in enum_values
def test_generate_scalars(self):
self.assert_skeleton_from_model_is(
@@ -539,7 +521,7 @@ class TestArgumentGenerator(unittest.TestCase):
actual = self.arg_generator.generate_skeleton(shape)
expected = {'StringList': ['StringType']}
- self.assertEqual(actual, expected)
+ assert actual == expected
def test_generate_nested_structure(self):
self.assert_skeleton_from_model_is(
@@ -623,25 +605,22 @@ class TestArgumentGenerator(unittest.TestCase):
},
'B': ''
}
- self.assertEqual(actual, expected)
+ assert actual == expected
class TestChecksums(unittest.TestCase):
def test_empty_hash(self):
- self.assertEqual(
- calculate_sha256(six.BytesIO(b''), as_hex=True),
+ assert calculate_sha256(six.BytesIO(b''), as_hex=True) == (
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
def test_as_hex(self):
- self.assertEqual(
- calculate_sha256(six.BytesIO(b'hello world'), as_hex=True),
+ assert calculate_sha256(six.BytesIO(b'hello world'), as_hex=True) == (
'b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9')
def test_as_binary(self):
- self.assertEqual(
- calculate_sha256(six.BytesIO(b'hello world'), as_hex=False),
- (b"\xb9M'\xb9\x93M>\x08\xa5.R\xd7\xda}\xab\xfa\xc4\x84\xef"
- b"\xe3zS\x80\xee\x90\x88\xf7\xac\xe2\xef\xcd\xe9"))
+ assert calculate_sha256(six.BytesIO(b'hello world'), as_hex=False) == (
+ b"\xb9M'\xb9\x93M>\x08\xa5.R\xd7\xda}\xab\xfa\xc4\x84\xef"
+ b"\xe3zS\x80\xee\x90\x88\xf7\xac\xe2\xef\xcd\xe9")
class TestTreeHash(unittest.TestCase):
@@ -650,71 +629,65 @@ class TestTreeHash(unittest.TestCase):
# SDK implementations.
def test_empty_tree_hash(self):
- self.assertEqual(
- calculate_tree_hash(six.BytesIO(b'')),
+ assert calculate_tree_hash(six.BytesIO(b'')) == (
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855')
def test_tree_hash_less_than_one_mb(self):
one_k = six.BytesIO(b'a' * 1024)
- self.assertEqual(
- calculate_tree_hash(one_k),
+ assert calculate_tree_hash(one_k) == (
'2edc986847e209b4016e141a6dc8716d3207350f416969382d431539bf292e4a')
def test_tree_hash_exactly_one_mb(self):
one_meg_bytestring = b'a' * (1 * 1024 * 1024)
one_meg = six.BytesIO(one_meg_bytestring)
- self.assertEqual(
- calculate_tree_hash(one_meg),
+ assert calculate_tree_hash(one_meg) == (
'9bc1b2a288b26af7257a36277ae3816a7d4f16e89c1e7e77d0a5c48bad62b360')
def test_tree_hash_multiple_of_one_mb(self):
four_mb = six.BytesIO(b'a' * (4 * 1024 * 1024))
- self.assertEqual(
- calculate_tree_hash(four_mb),
+ assert calculate_tree_hash(four_mb) == (
'9491cb2ed1d4e7cd53215f4017c23ec4ad21d7050a1e6bb636c4f67e8cddb844')
def test_tree_hash_offset_of_one_mb_multiple(self):
offset_four_mb = six.BytesIO(b'a' * (4 * 1024 * 1024) + b'a' * 20)
- self.assertEqual(
- calculate_tree_hash(offset_four_mb),
+ assert calculate_tree_hash(offset_four_mb) == (
'12f3cbd6101b981cde074039f6f728071da8879d6f632de8afc7cdf00661b08f')
class TestIsValidEndpointURL(unittest.TestCase):
def test_dns_name_is_valid(self):
- self.assertTrue(is_valid_endpoint_url('https://s3.amazonaws.com/'))
+ assert is_valid_endpoint_url('https://s3.amazonaws.com/')
def test_ip_address_is_allowed(self):
- self.assertTrue(is_valid_endpoint_url('https://10.10.10.10/'))
+ assert is_valid_endpoint_url('https://10.10.10.10/')
def test_path_component_ignored(self):
- self.assertTrue(
- is_valid_endpoint_url('https://foo.bar.com/other/path/'))
+ assert is_valid_endpoint_url('https://foo.bar.com/other/path/')
def test_can_have_port(self):
- self.assertTrue(is_valid_endpoint_url('https://foo.bar.com:12345/'))
+ assert is_valid_endpoint_url('https://foo.bar.com:12345/')
def test_ip_can_have_port(self):
- self.assertTrue(is_valid_endpoint_url('https://10.10.10.10:12345/'))
+ assert is_valid_endpoint_url('https://10.10.10.10:12345/')
def test_cannot_have_spaces(self):
- self.assertFalse(is_valid_endpoint_url('https://my invalid name/'))
+ assert not is_valid_endpoint_url('https://my invalid name/')
def test_missing_scheme(self):
- self.assertFalse(is_valid_endpoint_url('foo.bar.com'))
+ assert not is_valid_endpoint_url('foo.bar.com')
def test_no_new_lines(self):
- self.assertFalse(is_valid_endpoint_url('https://foo.bar.com\nbar/'))
+ assert not is_valid_endpoint_url('https://foo.bar.com\nbar/')
def test_long_hostname(self):
long_hostname = 'htps://%s.com' % ('a' * 256)
- self.assertFalse(is_valid_endpoint_url(long_hostname))
+ assert not is_valid_endpoint_url(long_hostname)
def test_hostname_can_end_with_dot(self):
- self.assertTrue(is_valid_endpoint_url('https://foo.bar.com./'))
+ assert is_valid_endpoint_url('https://foo.bar.com./')
def test_hostname_no_dots(self):
- self.assertTrue(is_valid_endpoint_url('https://foo/'))
+ assert is_valid_endpoint_url('https://foo/')
class TestFixS3Host(unittest.TestCase):
@@ -728,9 +701,9 @@ class TestFixS3Host(unittest.TestCase):
fix_s3_host(
request=request, signature_version=signature_version,
region_name=region_name)
- self.assertEqual(request.url,
+ assert request.url == (
'https://bucket.s3-us-west-2.amazonaws.com/key.txt')
- self.assertEqual(request.auth_path, '/bucket/key.txt')
+ assert request.auth_path == '/bucket/key.txt'
def test_fix_s3_host_only_applied_once(self):
request = AWSRequest(
@@ -746,12 +719,12 @@ class TestFixS3Host(unittest.TestCase):
fix_s3_host(
request=request, signature_version=signature_version,
region_name=region_name)
- self.assertEqual(request.url,
+ assert request.url == (
'https://bucket.s3.us-west-2.amazonaws.com/key.txt')
# This was a bug previously. We want to make sure that
# calling fix_s3_host() again does not alter the auth_path.
# Otherwise we'll get signature errors.
- self.assertEqual(request.auth_path, '/bucket/key.txt')
+ assert request.auth_path == '/bucket/key.txt'
def test_dns_style_not_used_for_get_bucket_location(self):
original_url = 'https://s3-us-west-2.amazonaws.com/bucket?location'
@@ -766,7 +739,7 @@ class TestFixS3Host(unittest.TestCase):
region_name=region_name)
# The request url should not have been modified because this is
# a request for GetBucketLocation.
- self.assertEqual(request.url, original_url)
+ assert request.url == original_url
def test_can_provide_default_endpoint_url(self):
request = AWSRequest(
@@ -779,7 +752,7 @@ class TestFixS3Host(unittest.TestCase):
request=request, signature_version=signature_version,
region_name=region_name,
default_endpoint_url='foo.s3.amazonaws.com')
- self.assertEqual(request.url,
+ assert request.url == (
'https://bucket.foo.s3.amazonaws.com/key.txt')
def test_no_endpoint_url_uses_request_url(self):
@@ -795,7 +768,7 @@ class TestFixS3Host(unittest.TestCase):
# A value of None means use the url in the current request.
default_endpoint_url=None,
)
- self.assertEqual(request.url,
+ assert request.url == (
'https://bucket.s3-us-west-2.amazonaws.com/key.txt')
@@ -810,9 +783,9 @@ class TestSwitchToVirtualHostStyle(unittest.TestCase):
switch_to_virtual_host_style(
request=request, signature_version=signature_version,
region_name=region_name)
- self.assertEqual(request.url,
+ assert request.url == (
'https://bucket.foo.amazonaws.com/key.txt')
- self.assertEqual(request.auth_path, '/bucket/key.txt')
+ assert request.auth_path == '/bucket/key.txt'
def test_uses_default_endpoint(self):
request = AWSRequest(
@@ -824,9 +797,9 @@ class TestSwitchToVirtualHostStyle(unittest.TestCase):
switch_to_virtual_host_style(
request=request, signature_version=signature_version,
region_name=region_name, default_endpoint_url='s3.amazonaws.com')
- self.assertEqual(request.url,
+ assert request.url == (
'https://bucket.s3.amazonaws.com/key.txt')
- self.assertEqual(request.auth_path, '/bucket/key.txt')
+ assert request.auth_path == '/bucket/key.txt'
def test_throws_invalid_dns_name_error(self):
request = AWSRequest(
@@ -835,7 +808,7 @@ class TestSwitchToVirtualHostStyle(unittest.TestCase):
)
region_name = 'us-west-2'
signature_version = 's3'
- with self.assertRaises(InvalidDNSNameError):
+ with pytest.raises(InvalidDNSNameError):
switch_to_virtual_host_style(
request=request, signature_version=signature_version,
region_name=region_name)
@@ -854,12 +827,12 @@ class TestSwitchToVirtualHostStyle(unittest.TestCase):
switch_to_virtual_host_style(
request=request, signature_version=signature_version,
region_name=region_name)
- self.assertEqual(request.url,
+ assert request.url == (
'https://bucket.foo.amazonaws.com/key.txt')
# This was a bug previously. We want to make sure that
# calling fix_s3_host() again does not alter the auth_path.
# Otherwise we'll get signature errors.
- self.assertEqual(request.auth_path, '/bucket/key.txt')
+ assert request.auth_path == '/bucket/key.txt'
def test_virtual_host_style_for_make_bucket(self):
request = AWSRequest(
@@ -871,7 +844,7 @@ class TestSwitchToVirtualHostStyle(unittest.TestCase):
switch_to_virtual_host_style(
request=request, signature_version=signature_version,
region_name=region_name)
- self.assertEqual(request.url,
+ assert request.url == (
'https://bucket.foo.amazonaws.com/')
def test_virtual_host_style_not_used_for_get_bucket_location(self):
@@ -887,7 +860,7 @@ class TestSwitchToVirtualHostStyle(unittest.TestCase):
region_name=region_name)
# The request url should not have been modified because this is
# a request for GetBucketLocation.
- self.assertEqual(request.url, original_url)
+ assert request.url == original_url
def test_virtual_host_style_not_used_for_list_buckets(self):
original_url = 'https://foo.amazonaws.com/'
@@ -902,7 +875,7 @@ class TestSwitchToVirtualHostStyle(unittest.TestCase):
region_name=region_name)
# The request url should not have been modified because this is
# a request for GetBucketLocation.
- self.assertEqual(request.url, original_url)
+ assert request.url == original_url
def test_is_unaffected_by_sigv4(self):
request = AWSRequest(
@@ -914,7 +887,7 @@ class TestSwitchToVirtualHostStyle(unittest.TestCase):
switch_to_virtual_host_style(
request=request, signature_version=signature_version,
region_name=region_name, default_endpoint_url='s3.amazonaws.com')
- self.assertEqual(request.url,
+ assert request.url == (
'https://bucket.s3.amazonaws.com/key.txt')
@@ -936,13 +909,13 @@ class TestInstanceCache(unittest.TestCase):
def test_cache_single_method_call(self):
adder = self.DummyClass(self.cache)
- self.assertEqual(adder.add(2, 1), 3)
+ assert adder.add(2, 1) == 3
# This should result in one entry in the cache.
- self.assertEqual(len(self.cache), 1)
+ assert len(self.cache) == 1
# When we call the method with the same args,
# we should reuse the same entry in the cache.
- self.assertEqual(adder.add(2, 1), 3)
- self.assertEqual(len(self.cache), 1)
+ assert adder.add(2, 1) == 3
+ assert len(self.cache) == 1
def test_can_cache_multiple_methods(self):
adder = self.DummyClass(self.cache)
@@ -950,15 +923,15 @@ class TestInstanceCache(unittest.TestCase):
# A different method results in a new cache entry,
# so now there should be two elements in the cache.
- self.assertEqual(adder.sub(2, 1), 1)
- self.assertEqual(len(self.cache), 2)
- self.assertEqual(adder.sub(2, 1), 1)
+ assert adder.sub(2, 1) == 1
+ assert len(self.cache) == 2
+ assert adder.sub(2, 1) == 1
def test_can_cache_kwargs(self):
adder = self.DummyClass(self.cache)
adder.add(x=2, y=1)
- self.assertEqual(adder.add(x=2, y=1), 3)
- self.assertEqual(len(self.cache), 1)
+ assert adder.add(x=2, y=1) == 3
+ assert len(self.cache) == 1
class TestMergeDicts(unittest.TestCase):
@@ -969,9 +942,9 @@ class TestMergeDicts(unittest.TestCase):
merge_dicts(first, second)
# The value from the second dict wins.
- self.assertEqual(first['foo']['bar']['baz']['one'], 'UPDATE')
+ assert first['foo']['bar']['baz']['one'] == 'UPDATE'
# And we still preserve the other attributes.
- self.assertEqual(first['foo']['bar']['baz']['two'], 'ORIGINAL')
+ assert first['foo']['bar']['baz']['two'] == 'ORIGINAL'
def test_merge_dicts_new_keys(self):
first = {
@@ -979,14 +952,14 @@ class TestMergeDicts(unittest.TestCase):
second = {'foo': {'bar': {'baz': {'three': 'UPDATE'}}}}
merge_dicts(first, second)
- self.assertEqual(first['foo']['bar']['baz']['one'], 'ORIGINAL')
- self.assertEqual(first['foo']['bar']['baz']['two'], 'ORIGINAL')
- self.assertEqual(first['foo']['bar']['baz']['three'], 'UPDATE')
+ assert first['foo']['bar']['baz']['one'] == 'ORIGINAL'
+ assert first['foo']['bar']['baz']['two'] == 'ORIGINAL'
+ assert first['foo']['bar']['baz']['three'] == 'UPDATE'
def test_merge_empty_dict_does_nothing(self):
first = {'foo': {'bar': 'baz'}}
merge_dicts(first, {})
- self.assertEqual(first, {'foo': {'bar': 'baz'}})
+ assert first == {'foo': {'bar': 'baz'}}
def test_more_than_one_sub_dict(self):
first = {'one': {'inner': 'ORIGINAL', 'inner2': 'ORIGINAL'},
@@ -994,11 +967,11 @@ class TestMergeDicts(unittest.TestCase):
second = {'one': {'inner': 'UPDATE'}, 'two': {'inner': 'UPDATE'}}
merge_dicts(first, second)
- self.assertEqual(first['one']['inner'], 'UPDATE')
- self.assertEqual(first['one']['inner2'], 'ORIGINAL')
+ assert first['one']['inner'] == 'UPDATE'
+ assert first['one']['inner2'] == 'ORIGINAL'
- self.assertEqual(first['two']['inner'], 'UPDATE')
- self.assertEqual(first['two']['inner2'], 'ORIGINAL')
+ assert first['two']['inner'] == 'UPDATE'
+ assert first['two']['inner2'] == 'ORIGINAL'
def test_new_keys(self):
first = {'one': {'inner': 'ORIGINAL'}, 'two': {'inner': 'ORIGINAL'}}
@@ -1006,42 +979,38 @@ class TestMergeDicts(unittest.TestCase):
# In this case, second has no keys in common, but we'd still expect
# this to get merged.
merge_dicts(first, second)
- self.assertEqual(first['three']['foo']['bar'], 'baz')
+ assert first['three']['foo']['bar'] == 'baz'
def test_list_values_no_append(self):
dict1 = {'Foo': ['old_foo_value']}
dict2 = {'Foo': ['new_foo_value']}
merge_dicts(dict1, dict2)
- self.assertEqual(
- dict1, {'Foo': ['new_foo_value']})
+ assert dict1 == {'Foo': ['new_foo_value']}
def test_list_values_append(self):
dict1 = {'Foo': ['old_foo_value']}
dict2 = {'Foo': ['new_foo_value']}
merge_dicts(dict1, dict2, append_lists=True)
- self.assertEqual(
- dict1, {'Foo': ['old_foo_value', 'new_foo_value']})
+ assert dict1 == {'Foo': ['old_foo_value', 'new_foo_value']}
def test_list_values_mismatching_types(self):
dict1 = {'Foo': 'old_foo_value'}
dict2 = {'Foo': ['new_foo_value']}
merge_dicts(dict1, dict2, append_lists=True)
- self.assertEqual(
- dict1, {'Foo': ['new_foo_value']})
+ assert dict1 == {'Foo': ['new_foo_value']}
def test_list_values_missing_key(self):
dict1 = {}
dict2 = {'Foo': ['foo_value']}
merge_dicts(dict1, dict2, append_lists=True)
- self.assertEqual(
- dict1, {'Foo': ['foo_value']})
+ assert dict1 == {'Foo': ['foo_value']}
class TestLowercaseDict(unittest.TestCase):
def test_lowercase_dict_empty(self):
original = {}
copy = lowercase_dict(original)
- self.assertEqual(original, copy)
+ assert original == copy
def test_lowercase_dict_original_keys_lower(self):
original = {
@@ -1049,7 +1018,7 @@ class TestLowercaseDict(unittest.TestCase):
'lower_key2': 2,
}
copy = lowercase_dict(original)
- self.assertEqual(original, copy)
+ assert original == copy
def test_lowercase_dict_original_keys_mixed(self):
original = {
@@ -1061,7 +1030,7 @@ class TestLowercaseDict(unittest.TestCase):
'some_key': 'value',
'another_one': 'anothervalue',
}
- self.assertEqual(expected, copy)
+ assert expected == copy
class TestGetServiceModuleName(unittest.TestCase):
@@ -1081,96 +1050,73 @@ class TestGetServiceModuleName(unittest.TestCase):
self.service_description, 'myservice')
def test_default(self):
- self.assertEqual(
- get_service_module_name(self.service_model),
- 'MyService'
- )
+ assert get_service_module_name(self.service_model) == 'MyService'
def test_client_name_with_amazon(self):
self.service_description['metadata']['serviceFullName'] = (
'Amazon MyService')
- self.assertEqual(
- get_service_module_name(self.service_model),
- 'MyService'
- )
+ assert get_service_module_name(self.service_model) == 'MyService'
def test_client_name_using_abreviation(self):
self.service_description['metadata']['serviceAbbreviation'] = (
'Abbreviation')
- self.assertEqual(
- get_service_module_name(self.service_model),
- 'Abbreviation'
- )
+ assert get_service_module_name(self.service_model) == 'Abbreviation'
def test_client_name_with_non_alphabet_characters(self):
self.service_description['metadata']['serviceFullName'] = (
'Amazon My-Service')
- self.assertEqual(
- get_service_module_name(self.service_model),
- 'MyService'
- )
+ assert get_service_module_name(self.service_model) == 'MyService'
def test_client_name_with_no_full_name_or_abbreviation(self):
del self.service_description['metadata']['serviceFullName']
- self.assertEqual(
- get_service_module_name(self.service_model),
- 'myservice'
- )
+ assert get_service_module_name(self.service_model) == 'myservice'
class TestPercentEncodeSequence(unittest.TestCase):
def test_percent_encode_empty(self):
- self.assertEqual(percent_encode_sequence({}), '')
+ assert percent_encode_sequence({}) == ''
def test_percent_encode_special_chars(self):
- self.assertEqual(
- percent_encode_sequence({'k1': 'with spaces++/'}),
- 'k1=with%20spaces%2B%2B%2F')
+ assert percent_encode_sequence({'k1': 'with spaces++/'}) == 'k1=with%20spaces%2B%2B%2F'
def test_percent_encode_string_string_tuples(self):
- self.assertEqual(percent_encode_sequence([('k1', 'v1'), ('k2', 'v2')]),
- 'k1=v1&k2=v2')
+ assert percent_encode_sequence([('k1', 'v1'), ('k2', 'v2')]) == 'k1=v1&k2=v2'
def test_percent_encode_dict_single_pair(self):
- self.assertEqual(percent_encode_sequence({'k1': 'v1'}), 'k1=v1')
+ assert percent_encode_sequence({'k1': 'v1'}) == 'k1=v1'
def test_percent_encode_dict_string_string(self):
- self.assertEqual(
- percent_encode_sequence(OrderedDict([('k1', 'v1'), ('k2', 'v2')])),
- 'k1=v1&k2=v2')
+ assert percent_encode_sequence(OrderedDict([('k1', 'v1'), ('k2', 'v2')])) == 'k1=v1&k2=v2'
def test_percent_encode_single_list_of_values(self):
- self.assertEqual(percent_encode_sequence({'k1': ['a', 'b', 'c']}),
- 'k1=a&k1=b&k1=c')
+ assert percent_encode_sequence({'k1': ['a', 'b', 'c']}) == 'k1=a&k1=b&k1=c'
def test_percent_encode_list_values_of_string(self):
- self.assertEqual(
- percent_encode_sequence(
+ assert percent_encode_sequence(
OrderedDict([('k1', ['a', 'list']),
- ('k2', ['another', 'list'])])),
- 'k1=a&k1=list&k2=another&k2=list')
+ ('k2', ['another', 'list'])])) == 'k1=a&k1=list&k2=another&k2=list'
class TestPercentEncode(unittest.TestCase):
def test_percent_encode_obj(self):
- self.assertEqual(percent_encode(1), '1')
+ assert percent_encode(1) == '1'
def test_percent_encode_text(self):
- self.assertEqual(percent_encode(u''), '')
- self.assertEqual(percent_encode(u'a'), 'a')
- self.assertEqual(percent_encode(u'\u0000'), '%00')
+ assert percent_encode(u'') == ''
+ assert percent_encode(u'a') == 'a'
+ assert percent_encode(u'\u0000') == '%00'
# Codepoint > 0x7f
- self.assertEqual(percent_encode(u'\u2603'), '%E2%98%83')
+ assert percent_encode(u'\u2603') == '%E2%98%83'
# Codepoint > 0xffff
- self.assertEqual(percent_encode(u'\U0001f32e'), '%F0%9F%8C%AE')
+ assert percent_encode(u'\U0001f32e') == '%F0%9F%8C%AE'
def test_percent_encode_bytes(self):
- self.assertEqual(percent_encode(b''), '')
- self.assertEqual(percent_encode(b'a'), u'a')
- self.assertEqual(percent_encode(b'\x00'), u'%00')
+ assert percent_encode(b'') == ''
+ assert percent_encode(b'a') == u'a'
+ assert percent_encode(b'\x00') == u'%00'
# UTF-8 Snowman
- self.assertEqual(percent_encode(b'\xe2\x98\x83'), '%E2%98%83')
+ assert percent_encode(b'\xe2\x98\x83') == '%E2%98%83'
# Arbitrary bytes (not valid UTF-8).
- self.assertEqual(percent_encode(b'\x80\x00'), '%80%00')
+ assert percent_encode(b'\x80\x00') == '%80%00'
class TestSwitchHostS3Accelerate(unittest.TestCase):
def setUp(self):
@@ -1184,8 +1130,7 @@ class TestSwitchHostS3Accelerate(unittest.TestCase):
def test_switch_host(self):
switch_host_s3_accelerate(self.request, 'PutObject')
- self.assertEqual(
- self.request.url,
+ assert self.request.url == (
'https://s3-accelerate.amazonaws.com/foo/key.txt')
def test_do_not_switch_black_listed_operations(self):
@@ -1198,14 +1143,12 @@ class TestSwitchHostS3Accelerate(unittest.TestCase):
]
for op_name in blacklist_ops:
switch_host_s3_accelerate(self.request, op_name)
- self.assertEqual(self.request.url, self.original_url)
+ assert self.request.url == self.original_url
def test_uses_original_endpoint_scheme(self):
self.request.url = 'http://s3.amazonaws.com/foo/key.txt'
switch_host_s3_accelerate(self.request, 'PutObject')
- self.assertEqual(
- self.request.url,
- 'http://s3-accelerate.amazonaws.com/foo/key.txt')
+ assert self.request.url == 'http://s3-accelerate.amazonaws.com/foo/key.txt'
def test_uses_dualstack(self):
self.client_config.s3 = {'use_dualstack_endpoint': True}
@@ -1216,9 +1159,7 @@ class TestSwitchHostS3Accelerate(unittest.TestCase):
)
self.request.context['client_config'] = self.client_config
switch_host_s3_accelerate(self.request, 'PutObject')
- self.assertEqual(
- self.request.url,
- 'https://s3-accelerate.dualstack.amazonaws.com/foo/key.txt')
+ assert self.request.url == 'https://s3-accelerate.dualstack.amazonaws.com/foo/key.txt'
class TestDeepMerge(unittest.TestCase):
@@ -1228,7 +1169,7 @@ class TestDeepMerge(unittest.TestCase):
deep_merge(a, b)
expected = {'key': 'value', 'otherkey': 'othervalue'}
- self.assertEqual(a, expected)
+ assert a == expected
def test_merge_list(self):
# Lists are treated as opaque data and so no effort should be made to
@@ -1236,49 +1177,49 @@ class TestDeepMerge(unittest.TestCase):
a = {'key': ['original']}
b = {'key': ['new']}
deep_merge(a, b)
- self.assertEqual(a, {'key': ['new']})
+ assert a == {'key': ['new']}
def test_merge_number(self):
# The value from b is always taken
a = {'key': 10}
b = {'key': 45}
deep_merge(a, b)
- self.assertEqual(a, {'key': 45})
+ assert a == {'key': 45}
a = {'key': 45}
b = {'key': 10}
deep_merge(a, b)
- self.assertEqual(a, {'key': 10})
+ assert a == {'key': 10}
def test_merge_boolean(self):
# The value from b is always taken
a = {'key': False}
b = {'key': True}
deep_merge(a, b)
- self.assertEqual(a, {'key': True})
+ assert a == {'key': True}
a = {'key': True}
b = {'key': False}
deep_merge(a, b)
- self.assertEqual(a, {'key': False})
+ assert a == {'key': False}
def test_merge_string(self):
a = {'key': 'value'}
b = {'key': 'othervalue'}
deep_merge(a, b)
- self.assertEqual(a, {'key': 'othervalue'})
+ assert a == {'key': 'othervalue'}
def test_merge_overrides_value(self):
# The value from b is always taken, even when it's a different type
a = {'key': 'original'}
b = {'key': {'newkey': 'newvalue'}}
deep_merge(a, b)
- self.assertEqual(a, {'key': {'newkey': 'newvalue'}})
+ assert a == {'key': {'newkey': 'newvalue'}}
a = {'key': {'anotherkey': 'value'}}
b = {'key': 'newvalue'}
deep_merge(a, b)
- self.assertEqual(a, {'key': 'newvalue'})
+ assert a == {'key': 'newvalue'}
def test_deep_merge(self):
a = {
@@ -1310,7 +1251,7 @@ class TestDeepMerge(unittest.TestCase):
'key': 'value'
}
}
- self.assertEqual(a, expected)
+ assert a == expected
class TestS3RegionRedirector(unittest.TestCase):
@@ -1350,15 +1291,13 @@ class TestS3RegionRedirector(unittest.TestCase):
'endpoint': 'https://eu-central-1.amazonaws.com'
}}
self.redirector.set_request_url(params, context)
- self.assertEqual(
- params['url'], 'https://eu-central-1.amazonaws.com/foo')
+ assert params['url'] == 'https://eu-central-1.amazonaws.com/foo'
def test_only_changes_request_url_if_endpoint_present(self):
params = {'url': 'https://us-west-2.amazonaws.com/foo'}
context = {}
self.redirector.set_request_url(params, context)
- self.assertEqual(
- params['url'], 'https://us-west-2.amazonaws.com/foo')
+ assert params['url'] == 'https://us-west-2.amazonaws.com/foo'
def test_set_request_url_keeps_old_scheme(self):
params = {'url': 'http://us-west-2.amazonaws.com/foo'}
@@ -1366,8 +1305,7 @@ class TestS3RegionRedirector(unittest.TestCase):
'endpoint': 'https://eu-central-1.amazonaws.com'
}}
self.redirector.set_request_url(params, context)
- self.assertEqual(
- params['url'], 'http://eu-central-1.amazonaws.com/foo')
+ assert params['url'] == 'http://eu-central-1.amazonaws.com/foo'
def test_sets_signing_context_from_cache(self):
signing_context = {'endpoint': 'bar'}
@@ -1377,7 +1315,7 @@ class TestS3RegionRedirector(unittest.TestCase):
params = {'Bucket': 'foo'}
context = {}
self.redirector.redirect_from_cache(params, context)
- self.assertEqual(context.get('signing'), signing_context)
+ assert context.get('signing') == signing_context
def test_only_changes_context_if_bucket_in_cache(self):
signing_context = {'endpoint': 'bar'}
@@ -1387,7 +1325,7 @@ class TestS3RegionRedirector(unittest.TestCase):
params = {'Bucket': 'foo'}
context = {}
self.redirector.redirect_from_cache(params, context)
- self.assertNotEqual(context.get('signing'), signing_context)
+ assert context.get('signing') != signing_context
def test_redirect_from_error(self):
request_dict = {
@@ -1409,10 +1347,9 @@ class TestS3RegionRedirector(unittest.TestCase):
request_dict, response, self.operation)
# The response needs to be 0 so that there is no retry delay
- self.assertEqual(redirect_response, 0)
+ assert redirect_response == 0
- self.assertEqual(
- request_dict['url'], 'https://eu-central-1.amazonaws.com/foo')
+ assert request_dict['url'] == 'https://eu-central-1.amazonaws.com/foo'
expected_signing_context = {
'endpoint': 'https://eu-central-1.amazonaws.com',
@@ -1420,8 +1357,8 @@ class TestS3RegionRedirector(unittest.TestCase):
'region': 'eu-central-1'
}
signing_context = request_dict['context'].get('signing')
- self.assertEqual(signing_context, expected_signing_context)
- self.assertTrue(request_dict['context'].get('s3_redirected'))
+ assert signing_context == expected_signing_context
+ assert request_dict['context'].get('s3_redirected')
def test_does_not_redirect_if_previously_redirected(self):
request_dict = {
@@ -1442,15 +1379,15 @@ class TestS3RegionRedirector(unittest.TestCase):
})
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
- self.assertIsNone(redirect_response)
+ assert redirect_response is None
def test_does_not_redirect_unless_permanentredirect_recieved(self):
request_dict = {}
response = (None, {})
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
- self.assertIsNone(redirect_response)
- self.assertEqual(request_dict, {})
+ assert redirect_response is None
+ assert request_dict == {}
def test_does_not_redirect_if_region_cannot_be_found(self):
request_dict = {'url': 'https://us-west-2.amazonaws.com/foo',
@@ -1469,7 +1406,7 @@ class TestS3RegionRedirector(unittest.TestCase):
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
- self.assertIsNone(redirect_response)
+ assert redirect_response is None
def test_redirects_301(self):
request_dict = {'url': 'https://us-west-2.amazonaws.com/foo',
@@ -1487,12 +1424,12 @@ class TestS3RegionRedirector(unittest.TestCase):
self.operation.name = 'HeadObject'
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
- self.assertEqual(redirect_response, 0)
+ assert redirect_response == 0
self.operation.name = 'ListObjects'
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
- self.assertIsNone(redirect_response)
+ assert redirect_response is None
def test_redirects_400_head_bucket(self):
request_dict = {'url': 'https://us-west-2.amazonaws.com/foo',
@@ -1507,12 +1444,12 @@ class TestS3RegionRedirector(unittest.TestCase):
self.operation.name = 'HeadObject'
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
- self.assertEqual(redirect_response, 0)
+ assert redirect_response == 0
self.operation.name = 'ListObjects'
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
- self.assertIsNone(redirect_response)
+ assert redirect_response is None
def test_does_not_redirect_400_head_bucket_no_region_header(self):
# We should not redirect a 400 Head* if the region header is not
@@ -1530,9 +1467,9 @@ class TestS3RegionRedirector(unittest.TestCase):
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
head_bucket_calls = self.client.head_bucket.call_count
- self.assertIsNone(redirect_response)
+ assert redirect_response is None
# We should not have made an additional head bucket call
- self.assertEqual(head_bucket_calls, 0)
+ assert head_bucket_calls == 0
def test_does_not_redirect_if_None_response(self):
request_dict = {'url': 'https://us-west-2.amazonaws.com/foo',
@@ -1540,7 +1477,7 @@ class TestS3RegionRedirector(unittest.TestCase):
response = None
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
- self.assertIsNone(redirect_response)
+ assert redirect_response is None
def test_get_region_from_response(self):
response = (None, {
@@ -1554,7 +1491,7 @@ class TestS3RegionRedirector(unittest.TestCase):
}
})
region = self.redirector.get_bucket_region('foo', response)
- self.assertEqual(region, 'eu-central-1')
+ assert region == 'eu-central-1'
def test_get_region_from_response_error_body(self):
response = (None, {
@@ -1569,7 +1506,7 @@ class TestS3RegionRedirector(unittest.TestCase):
}
})
region = self.redirector.get_bucket_region('foo', response)
- self.assertEqual(region, 'eu-central-1')
+ assert region == 'eu-central-1'
def test_get_region_from_head_bucket_error(self):
self.set_client_response_headers(
@@ -1585,7 +1522,7 @@ class TestS3RegionRedirector(unittest.TestCase):
}
})
region = self.redirector.get_bucket_region('foo', response)
- self.assertEqual(region, 'eu-central-1')
+ assert region == 'eu-central-1'
def test_get_region_from_head_bucket_success(self):
success_response = {
@@ -1606,7 +1543,7 @@ class TestS3RegionRedirector(unittest.TestCase):
}
})
region = self.redirector.get_bucket_region('foo', response)
- self.assertEqual(region, 'eu-central-1')
+ assert region == 'eu-central-1'
def test_no_redirect_from_error_for_accesspoint(self):
request_dict = {
@@ -1628,7 +1565,7 @@ class TestS3RegionRedirector(unittest.TestCase):
self.operation.name = 'HeadObject'
redirect_response = self.redirector.redirect_from_error(
request_dict, response, self.operation)
- self.assertEqual(redirect_response, None)
+ assert redirect_response is None
def test_no_redirect_from_cache_for_accesspoint(self):
self.cache['foo'] = {'endpoint': 'foo-endpoint'}
@@ -1637,7 +1574,7 @@ class TestS3RegionRedirector(unittest.TestCase):
params = {'Bucket': 'foo'}
context = {'s3_accesspoint': {}}
self.redirector.redirect_from_cache(params, context)
- self.assertNotIn('signing', context)
+ assert 'signing' not in context
class TestArnParser(unittest.TestCase):
@@ -1646,46 +1583,37 @@ class TestArnParser(unittest.TestCase):
def test_parse(self):
arn = 'arn:aws:s3:us-west-2:1023456789012:myresource'
- self.assertEqual(
- self.parser.parse_arn(arn),
- {
+ assert self.parser.parse_arn(arn) == {
'partition': 'aws',
'service': 's3',
'region': 'us-west-2',
'account': '1023456789012',
'resource': 'myresource',
}
- )
def test_parse_invalid_arn(self):
- with self.assertRaises(InvalidArnException):
+ with pytest.raises(InvalidArnException):
self.parser.parse_arn('arn:aws:s3')
def test_parse_arn_with_resource_type(self):
arn = 'arn:aws:s3:us-west-2:1023456789012:bucket_name:mybucket'
- self.assertEqual(
- self.parser.parse_arn(arn),
- {
+ assert self.parser.parse_arn(arn) == {
'partition': 'aws',
'service': 's3',
'region': 'us-west-2',
'account': '1023456789012',
'resource': 'bucket_name:mybucket',
}
- )
def test_parse_arn_with_empty_elements(self):
arn = 'arn:aws:s3:::mybucket'
- self.assertEqual(
- self.parser.parse_arn(arn),
- {
+ assert self.parser.parse_arn(arn) == {
'partition': 'aws',
'service': 's3',
'region': '',
'account': '',
'resource': 'mybucket',
}
- )
class TestS3ArnParamHandler(unittest.TestCase):
@@ -1706,10 +1634,8 @@ class TestS3ArnParamHandler(unittest.TestCase):
}
context = {}
self.arn_handler.handle_arn(params, self.model, context)
- self.assertEqual(params, {'Bucket': 'endpoint'})
- self.assertEqual(
- context,
- {
+ assert params == {'Bucket': 'endpoint'}
+ assert context == {
's3_accesspoint': {
'name': 'endpoint',
'account': '123456789012',
@@ -1718,7 +1644,6 @@ class TestS3ArnParamHandler(unittest.TestCase):
'service': 's3',
}
}
- )
def test_accesspoint_arn_with_colon(self):
params = {
@@ -1726,10 +1651,8 @@ class TestS3ArnParamHandler(unittest.TestCase):
}
context = {}
self.arn_handler.handle_arn(params, self.model, context)
- self.assertEqual(params, {'Bucket': 'endpoint'})
- self.assertEqual(
- context,
- {
+ assert params == {'Bucket': 'endpoint'}
+ assert context == {
's3_accesspoint': {
'name': 'endpoint',
'account': '123456789012',
@@ -1738,14 +1661,13 @@ class TestS3ArnParamHandler(unittest.TestCase):
'service': 's3',
}
}
- )
def test_errors_for_non_accesspoint_arn(self):
params = {
'Bucket': 'arn:aws:s3:us-west-2:123456789012:unsupported:resource'
}
context = {}
- with self.assertRaises(UnsupportedS3ArnError):
+ with pytest.raises(UnsupportedS3ArnError):
self.arn_handler.handle_arn(params, self.model, context)
def test_outpost_arn_with_colon(self):
@@ -1816,8 +1738,8 @@ class TestS3ArnParamHandler(unittest.TestCase):
params = {'Bucket': 'mybucket'}
context = {}
self.arn_handler.handle_arn(params, self.model, context)
- self.assertEqual(params, {'Bucket': 'mybucket'})
- self.assertEqual(context, {})
+ assert params == {'Bucket': 'mybucket'}
+ assert context == {}
def test_ignores_create_bucket(self):
arn = 'arn:aws:s3:us-west-2:123456789012:accesspoint/endpoint'
@@ -1825,8 +1747,8 @@ class TestS3ArnParamHandler(unittest.TestCase):
context = {}
self.model.name = 'CreateBucket'
self.arn_handler.handle_arn(params, self.model, context)
- self.assertEqual(params, {'Bucket': arn})
- self.assertEqual(context, {})
+ assert params == {'Bucket': arn}
+ assert context == {}
class TestS3EndpointSetter(unittest.TestCase):
@@ -1938,7 +1860,7 @@ class TestS3EndpointSetter(unittest.TestCase):
expected_url = 'https://%s-%s.s3-accesspoint.%s.amazonaws.com/' % (
self.accesspoint_name, self.account, self.region_name
)
- self.assertEqual(request.url, expected_url)
+ assert request.url == expected_url
def test_accesspoint_preserves_key_in_path(self):
request = self.get_s3_accesspoint_request(key=self.key)
@@ -1947,7 +1869,7 @@ class TestS3EndpointSetter(unittest.TestCase):
self.accesspoint_name, self.account, self.region_name,
self.key
)
- self.assertEqual(request.url, expected_url)
+ assert request.url == expected_url
def test_accesspoint_preserves_scheme(self):
request = self.get_s3_accesspoint_request(scheme='http://')
@@ -1955,7 +1877,7 @@ class TestS3EndpointSetter(unittest.TestCase):
expected_url = 'http://%s-%s.s3-accesspoint.%s.amazonaws.com/' % (
self.accesspoint_name, self.account, self.region_name,
)
- self.assertEqual(request.url, expected_url)
+ assert request.url == expected_url
def test_accesspoint_preserves_query_string(self):
request = self.get_s3_accesspoint_request(querystring='acl')
@@ -1963,7 +1885,7 @@ class TestS3EndpointSetter(unittest.TestCase):
expected_url = 'https://%s-%s.s3-accesspoint.%s.amazonaws.com/?acl' % (
self.accesspoint_name, self.account, self.region_name,
)
- self.assertEqual(request.url, expected_url)
+ assert request.url == expected_url
def test_uses_resolved_dns_suffix(self):
self.endpoint_resolver.construct_endpoint.return_value = {
@@ -1974,7 +1896,7 @@ class TestS3EndpointSetter(unittest.TestCase):
expected_url = 'https://%s-%s.s3-accesspoint.%s.mysuffix.com/' % (
self.accesspoint_name, self.account, self.region_name,
)
- self.assertEqual(request.url, expected_url)
+ assert request.url == expected_url
def test_uses_region_of_client_if_use_arn_disabled(self):
client_region = 'client-region'
@@ -1985,13 +1907,13 @@ class TestS3EndpointSetter(unittest.TestCase):
expected_url = 'https://%s-%s.s3-accesspoint.%s.amazonaws.com/' % (
self.accesspoint_name, self.account, client_region,
)
- self.assertEqual(request.url, expected_url)
+ assert request.url == expected_url
def test_accesspoint_errors_for_custom_endpoint(self):
endpoint_setter = self.get_endpoint_setter(
endpoint_url='https://custom.com')
request = self.get_s3_accesspoint_request()
- with self.assertRaises(UnsupportedS3AccesspointConfigurationError):
+ with pytest.raises(UnsupportedS3AccesspointConfigurationError):
self.call_set_endpoint(endpoint_setter, request=request)
def test_errors_for_mismatching_partition(self):
@@ -1999,7 +1921,7 @@ class TestS3EndpointSetter(unittest.TestCase):
accesspoint_context = self.get_s3_accesspoint_context(partition='aws')
request = self.get_s3_accesspoint_request(
accesspoint_context=accesspoint_context)
- with self.assertRaises(UnsupportedS3AccesspointConfigurationError):
+ with pytest.raises(UnsupportedS3AccesspointConfigurationError):
self.call_set_endpoint(endpoint_setter, request=request)
def test_errors_for_mismatching_partition_when_using_client_region(self):
@@ -2009,7 +1931,7 @@ class TestS3EndpointSetter(unittest.TestCase):
accesspoint_context = self.get_s3_accesspoint_context(partition='aws')
request = self.get_s3_accesspoint_request(
accesspoint_context=accesspoint_context)
- with self.assertRaises(UnsupportedS3AccesspointConfigurationError):
+ with pytest.raises(UnsupportedS3AccesspointConfigurationError):
self.call_set_endpoint(endpoint_setter, request=request)
def test_set_endpoint_for_auto(self):
@@ -2020,7 +1942,7 @@ class TestS3EndpointSetter(unittest.TestCase):
expected_url = 'https://%s.s3.us-west-2.amazonaws.com/%s' % (
self.bucket, self.key
)
- self.assertEqual(request.url, expected_url)
+ assert request.url == expected_url
def test_set_endpoint_for_virtual(self):
endpoint_setter = self.get_endpoint_setter(
@@ -2030,7 +1952,7 @@ class TestS3EndpointSetter(unittest.TestCase):
expected_url = 'https://%s.s3.us-west-2.amazonaws.com/%s' % (
self.bucket, self.key
)
- self.assertEqual(request.url, expected_url)
+ assert request.url == expected_url
def test_set_endpoint_for_path(self):
endpoint_setter = self.get_endpoint_setter(
@@ -2040,7 +1962,7 @@ class TestS3EndpointSetter(unittest.TestCase):
expected_url = 'https://s3.us-west-2.amazonaws.com/%s/%s' % (
self.bucket, self.key
)
- self.assertEqual(request.url, expected_url)
+ assert request.url == expected_url
def test_set_endpoint_for_accelerate(self):
endpoint_setter = self.get_endpoint_setter(
@@ -2050,7 +1972,7 @@ class TestS3EndpointSetter(unittest.TestCase):
expected_url = 'https://%s.s3-accelerate.amazonaws.com/%s' % (
self.bucket, self.key
)
- self.assertEqual(request.url, expected_url)
+ assert request.url == expected_url
class TestContainerMetadataFetcher(unittest.TestCase):
@@ -2085,25 +2007,25 @@ class TestContainerMetadataFetcher(unittest.TestCase):
def assert_request(self, method, url, headers):
request = self.http.send.call_args[0][0]
- self.assertEqual(request.method, method)
- self.assertEqual(request.url, url)
- self.assertEqual(request.headers, headers)
+ assert request.method == method
+ assert request.url == url
+ assert request.headers == headers
def assert_can_retrieve_metadata_from(self, full_uri):
response_body = {'foo': 'bar'}
self.set_http_responses_to(response_body)
fetcher = self.create_fetcher()
response = fetcher.retrieve_full_uri(full_uri)
- self.assertEqual(response, response_body)
+ assert response == response_body
self.assert_request('GET', full_uri, {'Accept': 'application/json'})
def assert_host_is_not_allowed(self, full_uri):
response_body = {'foo': 'bar'}
self.set_http_responses_to(response_body)
fetcher = self.create_fetcher()
- with six.assertRaisesRegex(self, ValueError, 'Unsupported host'):
+ with pytest.raises(ValueError, match='Unsupported host'):
fetcher.retrieve_full_uri(full_uri)
- self.assertFalse(self.http.send.called)
+ assert not self.http.send.called
def test_can_specify_extra_headers_are_merged(self):
headers = {
@@ -2130,7 +2052,7 @@ class TestContainerMetadataFetcher(unittest.TestCase):
fetcher = self.create_fetcher()
response = fetcher.retrieve_uri('/foo?id=1')
- self.assertEqual(response, json_body)
+ assert response == json_body
# Ensure we made calls to the right endpoint.
headers = {'Accept': 'application/json'}
self.assert_request('GET', 'http://169.254.170.2/foo?id=1', headers)
@@ -2152,7 +2074,7 @@ class TestContainerMetadataFetcher(unittest.TestCase):
)
fetcher = self.create_fetcher()
response = fetcher.retrieve_uri('/foo?id=1')
- self.assertEqual(response, success_response)
+ assert response == success_response
def test_propagates_credential_error_on_http_errors(self):
self.set_http_responses_to(
@@ -2165,9 +2087,9 @@ class TestContainerMetadataFetcher(unittest.TestCase):
)
# As a result, we expect an appropriate error to be raised.
fetcher = self.create_fetcher()
- with self.assertRaises(MetadataRetrievalError):
+ with pytest.raises(MetadataRetrievalError):
fetcher.retrieve_uri('/foo?id=1')
- self.assertEqual(self.http.send.call_count, fetcher.RETRY_ATTEMPTS)
+ assert self.http.send.call_count == fetcher.RETRY_ATTEMPTS
def test_error_raised_on_non_200_response(self):
self.set_http_responses_to(
@@ -2176,10 +2098,10 @@ class TestContainerMetadataFetcher(unittest.TestCase):
self.fake_response(status_code=404, body=b'Error not found'),
)
fetcher = self.create_fetcher()
- with self.assertRaises(MetadataRetrievalError):
+ with pytest.raises(MetadataRetrievalError):
fetcher.retrieve_uri('/foo?id=1')
# Should have tried up to RETRY_ATTEMPTS.
- self.assertEqual(self.http.send.call_count, fetcher.RETRY_ATTEMPTS)
+ assert self.http.send.call_count == fetcher.RETRY_ATTEMPTS
def test_error_raised_on_no_json_response(self):
# If the service returns a sucess response but with a body that
@@ -2191,11 +2113,11 @@ class TestContainerMetadataFetcher(unittest.TestCase):
self.fake_response(status_code=200, body=b'Not JSON'),
)
fetcher = self.create_fetcher()
- with self.assertRaises(MetadataRetrievalError) as e:
+ with pytest.raises(MetadataRetrievalError) as e:
fetcher.retrieve_uri('/foo?id=1')
- self.assertNotIn('Not JSON', str(e.exception))
+ assert 'Not JSON' not in str(e.value.exception)
# Should have tried up to RETRY_ATTEMPTS.
- self.assertEqual(self.http.send.call_count, fetcher.RETRY_ATTEMPTS)
+ assert self.http.send.call_count == fetcher.RETRY_ATTEMPTS
def test_can_retrieve_full_uri_with_fixed_ip(self):
self.assert_can_retrieve_metadata_from(
@@ -2234,10 +2156,10 @@ class TestContainerMetadataFetcher(unittest.TestCase):
class TestUnsigned(unittest.TestCase):
def test_copy_returns_same_object(self):
- self.assertIs(botocore.UNSIGNED, copy.copy(botocore.UNSIGNED))
+ assert botocore.UNSIGNED is copy.copy(botocore.UNSIGNED)
def test_deepcopy_returns_same_object(self):
- self.assertIs(botocore.UNSIGNED, copy.deepcopy(botocore.UNSIGNED))
+ assert botocore.UNSIGNED is copy.deepcopy(botocore.UNSIGNED)
class TestInstanceMetadataFetcher(unittest.TestCase):
@@ -2318,14 +2240,14 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
env = {'AWS_EC2_METADATA_DISABLED': 'true'}
fetcher = InstanceMetadataFetcher(env=env)
result = fetcher.retrieve_iam_role_credentials()
- self.assertEqual(result, {})
+ assert result == {}
self._send.assert_not_called()
def test_disabled_by_environment_mixed_case(self):
env = {'AWS_EC2_METADATA_DISABLED': 'tRuE'}
fetcher = InstanceMetadataFetcher(env=env)
result = fetcher.retrieve_iam_role_credentials()
- self.assertEqual(result, {})
+ assert result == {}
self._send.assert_not_called()
def test_disabling_env_var_not_true(self):
@@ -2337,7 +2259,7 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
fetcher = InstanceMetadataFetcher(base_url=url, env=env)
result = fetcher.retrieve_iam_role_credentials()
- self.assertEqual(result, self._expected_creds)
+ assert result == self._expected_creds
def test_imds_use_ipv6(self):
configs = [({'imds_use_ipv6': 'true'},'http://[fe80:ec2::254%eth0]/'),
@@ -2408,9 +2330,9 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
InstanceMetadataFetcher(
user_agent=user_agent).retrieve_iam_role_credentials()
- self.assertEqual(self._send.call_count, 3)
+ assert self._send.call_count == 3
for call in self._send.calls:
- self.assertTrue(call[0][0].headers['User-Agent'], user_agent)
+ assert call[0][0].headers['User-Agent'] == user_agent
def test_non_200_response_for_role_name_is_retried(self):
# Response for role name that have a non 200 status code should
@@ -2422,7 +2344,7 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
- self.assertEqual(result, self._expected_creds)
+ assert result == self._expected_creds
def test_http_connection_error_for_role_name_is_retried(self):
# Connection related errors should be retried
@@ -2432,7 +2354,7 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
- self.assertEqual(result, self._expected_creds)
+ assert result == self._expected_creds
def test_empty_response_for_role_name_is_retried(self):
# Response for role name that have a non 200 status code should
@@ -2443,7 +2365,7 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
- self.assertEqual(result, self._expected_creds)
+ assert result == self._expected_creds
def test_non_200_response_is_retried(self):
self.add_get_token_imds_response(token='token')
@@ -2455,7 +2377,7 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
- self.assertEqual(result, self._expected_creds)
+ assert result == self._expected_creds
def test_http_connection_errors_is_retried(self):
self.add_get_token_imds_response(token='token')
@@ -2465,7 +2387,7 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
- self.assertEqual(result, self._expected_creds)
+ assert result == self._expected_creds
def test_empty_response_is_retried(self):
self.add_get_token_imds_response(token='token')
@@ -2476,7 +2398,7 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
- self.assertEqual(result, self._expected_creds)
+ assert result == self._expected_creds
def test_invalid_json_is_retried(self):
self.add_get_token_imds_response(token='token')
@@ -2487,14 +2409,14 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
self.add_get_credentials_imds_response()
result = InstanceMetadataFetcher(
num_attempts=2).retrieve_iam_role_credentials()
- self.assertEqual(result, self._expected_creds)
+ assert result == self._expected_creds
def test_exhaust_retries_on_role_name_request(self):
self.add_get_token_imds_response(token='token')
self.add_imds_response(status_code=400, body=b'')
result = InstanceMetadataFetcher(
num_attempts=1).retrieve_iam_role_credentials()
- self.assertEqual(result, {})
+ assert result == {}
def test_exhaust_retries_on_credentials_request(self):
self.add_get_token_imds_response(token='token')
@@ -2502,7 +2424,7 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
self.add_imds_response(status_code=400, body=b'')
result = InstanceMetadataFetcher(
num_attempts=1).retrieve_iam_role_credentials()
- self.assertEqual(result, {})
+ assert result == {}
def test_missing_fields_in_credentials_response(self):
self.add_get_token_imds_response(token='token')
@@ -2512,7 +2434,7 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
self.add_imds_response(
body=b'{"Code":"AssumeRoleUnauthorizedAccess","Message":"error"}')
result = InstanceMetadataFetcher().retrieve_iam_role_credentials()
- self.assertEqual(result, {})
+ assert result == {}
def test_token_is_included(self):
user_agent = 'my-user-agent'
@@ -2522,10 +2444,10 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
user_agent=user_agent).retrieve_iam_role_credentials()
# Check that subsequent calls after getting the token include the token.
- self.assertEqual(self._send.call_count, 3)
+ assert self._send.call_count == 3
for call in self._send.call_args_list[1:]:
- self.assertEqual(call[0][0].headers['x-aws-ec2-metadata-token'], 'token')
- self.assertEqual(result, self._expected_creds)
+ assert call[0][0].headers['x-aws-ec2-metadata-token'] == 'token'
+ assert result == self._expected_creds
def test_metadata_token_not_supported_404(self):
user_agent = 'my-user-agent'
@@ -2538,7 +2460,7 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
for call in self._send.call_args_list[1:]:
self.assertNotIn('x-aws-ec2-metadata-token', call[0][0].headers)
- self.assertEqual(result, self._expected_creds)
+ assert result == self._expected_creds
def test_metadata_token_not_supported_403(self):
user_agent = 'my-user-agent'
@@ -2550,8 +2472,8 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
user_agent=user_agent).retrieve_iam_role_credentials()
for call in self._send.call_args_list[1:]:
- self.assertNotIn('x-aws-ec2-metadata-token', call[0][0].headers)
- self.assertEqual(result, self._expected_creds)
+ assert 'x-aws-ec2-metadata-token' not in call[0][0].headers
+ assert result == self._expected_creds
def test_metadata_token_not_supported_405(self):
user_agent = 'my-user-agent'
@@ -2563,8 +2485,8 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
user_agent=user_agent).retrieve_iam_role_credentials()
for call in self._send.call_args_list[1:]:
- self.assertNotIn('x-aws-ec2-metadata-token', call[0][0].headers)
- self.assertEqual(result, self._expected_creds)
+ assert 'x-aws-ec2-metadata-token' not in call[0][0].headers
+ assert result == self._expected_creds
def test_metadata_token_not_supported_timeout(self):
user_agent = 'my-user-agent'
@@ -2576,8 +2498,8 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
user_agent=user_agent).retrieve_iam_role_credentials()
for call in self._send.call_args_list[1:]:
- self.assertNotIn('x-aws-ec2-metadata-token', call[0][0].headers)
- self.assertEqual(result, self._expected_creds)
+ assert 'x-aws-ec2-metadata-token' not in call[0][0].headers
+ assert result == self._expected_creds
def test_token_not_supported_exhaust_retries(self):
user_agent = 'my-user-agent'
@@ -2589,15 +2511,15 @@ class TestInstanceMetadataFetcher(unittest.TestCase):
user_agent=user_agent).retrieve_iam_role_credentials()
for call in self._send.call_args_list[1:]:
- self.assertNotIn('x-aws-ec2-metadata-token', call[0][0].headers)
- self.assertEqual(result, self._expected_creds)
+ assert 'x-aws-ec2-metadata-token' not in call[0][0].headers
+ assert result == self._expected_creds
def test_metadata_token_bad_request_yields_no_credentials(self):
user_agent = 'my-user-agent'
self.add_imds_response(b'', status_code=400)
result = InstanceMetadataFetcher(
user_agent=user_agent).retrieve_iam_role_credentials()
- self.assertEqual(result, {})
+ assert result == {}
class TestSSOTokenLoader(unittest.TestCase):
@@ -2616,13 +2538,13 @@ class TestSSOTokenLoader(unittest.TestCase):
def test_can_load_token_exists(self):
self.cache[self.cache_key] = self.cached_token
access_token = self.loader(self.start_url)
- self.assertEqual(self.access_token, access_token)
+ assert self.access_token == access_token
def test_can_handle_does_not_exist(self):
- with self.assertRaises(SSOTokenLoadError):
+ with pytest.raises(SSOTokenLoadError):
access_token = self.loader(self.start_url)
def test_can_handle_invalid_cache(self):
self.cache[self.cache_key] = {}
- with self.assertRaises(SSOTokenLoadError):
+ with pytest.raises(SSOTokenLoadError):
access_token = self.loader(self.start_url)
diff --git a/tests/unit/test_validate.py b/tests/unit/test_validate.py
index 88e809f2..e157735b 100644
--- a/tests/unit/test_validate.py
+++ b/tests/unit/test_validate.py
@@ -22,10 +22,10 @@ class BaseTestValidate(unittest.TestCase):
# Also, this assumes the input shape name is "Input".
errors_found = self.get_validation_error_message(
given_shapes, input_params)
- self.assertTrue(errors_found.has_errors())
+ assert errors_found.has_errors()
error_message = errors_found.generate_report()
for error in errors:
- self.assertIn(error, error_message)
+ assert error in error_message
def get_validation_error_message(self, given_shapes, input_params):
s = ShapeResolver(given_shapes)
@@ -115,7 +115,7 @@ class TestValidateJSONValueTrait(BaseTestValidate):
'json': {'data': [1, 2.3, '3'], 'unicode': u'\u2713'}
})
error_msg = errors.generate_report()
- self.assertEqual(error_msg, '')
+ assert error_msg == ''
def test_validate_jsonvalue_string(self):
self.shapes = {
@@ -203,14 +203,14 @@ class TestValidateTypes(BaseTestValidate):
given_shapes=self.shapes,
input_params={'Timestamp': datetime.now(),})
error_msg = errors.generate_report()
- self.assertEqual(error_msg, '')
+ assert error_msg == ''
def test_datetime_accepts_string_timestamp(self):
errors = self.get_validation_error_message(
given_shapes=self.shapes,
input_params={'Timestamp': '2014-01-01 12:00:00'})
error_msg = errors.generate_report()
- self.assertEqual(error_msg, '')
+ assert error_msg == ''
def test_can_handle_none_datetimes(self):
# This is specifically to test a workaround a bug in dateutil
@@ -220,7 +220,7 @@ class TestValidateTypes(BaseTestValidate):
given_shapes=self.shapes,
input_params={'Timestamp': None})
error_msg = errors.generate_report()
- self.assertIn('Invalid type for parameter Timestamp', error_msg)
+ assert 'Invalid type for parameter Timestamp' in error_msg
class TestValidateRanges(BaseTestValidate):
@@ -292,14 +292,14 @@ class TestValidateRanges(BaseTestValidate):
},
)
error_msg = errors.generate_report()
- self.assertEqual(error_msg, '')
+ assert error_msg == ''
def test_within_range(self):
errors = self.get_validation_error_message(
given_shapes=self.shapes,
input_params={'Int': 10})
error_msg = errors.generate_report()
- self.assertEqual(error_msg, '')
+ assert error_msg == ''
def test_string_min_length_contraint(self):
self.assert_has_validation_errors(
@@ -320,7 +320,7 @@ class TestValidateRanges(BaseTestValidate):
},
)
error_msg = errors.generate_report()
- self.assertEqual(error_msg, '')
+ assert error_msg == ''
def test_list_min_length_constraint(self):
self.assert_has_validation_errors(
@@ -341,7 +341,7 @@ class TestValidateRanges(BaseTestValidate):
},
)
error_msg = errors.generate_report()
- self.assertEqual(error_msg, '')
+ assert error_msg == ''
def test_only_min_value_specified(self):
# min anx max don't have to both be provided.
@@ -365,7 +365,7 @@ class TestValidateRanges(BaseTestValidate):
},
)
error_msg = errors.generate_report()
- self.assertEqual(error_msg, '')
+ assert error_msg == ''
class TestValidateMapType(BaseTestValidate):
@@ -432,7 +432,7 @@ class TestValidationFloatType(BaseTestValidate):
given_shapes=self.shapes,
input_params={'Float': decimal.Decimal('2.12345')})
error_msg = errors.generate_report()
- self.assertEqual(error_msg, '')
+ assert error_msg == ''
def test_decimal_still_validates_range(self):
self.assert_has_validation_errors(
@@ -468,7 +468,7 @@ class TestValidateTypeBlob(BaseTestValidate):
input_params={'Blob': b'12345'}
)
error_msg = errors.generate_report()
- self.assertEqual(error_msg, '')
+ assert error_msg == ''
def test_validates_bytearray(self):
errors = self.get_validation_error_message(
@@ -476,7 +476,7 @@ class TestValidateTypeBlob(BaseTestValidate):
input_params={'Blob': bytearray(b'12345')},
)
error_msg = errors.generate_report()
- self.assertEqual(error_msg, '')
+ assert error_msg == ''
def test_validates_file_like_object(self):
value = six.BytesIO(b'foo')
@@ -486,7 +486,7 @@ class TestValidateTypeBlob(BaseTestValidate):
input_params={'Blob': value},
)
error_msg = errors.generate_report()
- self.assertEqual(error_msg, '')
+ assert error_msg == ''
def test_validate_type(self):
self.assert_has_validation_errors(
diff --git a/tests/unit/test_waiters.py b/tests/unit/test_waiters.py
index 56e5ab0a..53ed2157 100644
--- a/tests/unit/test_waiters.py
+++ b/tests/unit/test_waiters.py
@@ -12,6 +12,7 @@
# language governing permissions and limitations under the License.
import os
from tests import unittest, BaseEnvVar
+import pytest
from tests import mock
@@ -43,7 +44,7 @@ class TestWaiterModel(unittest.TestCase):
return config.acceptors[0].matcher_func
def test_waiter_version(self):
- self.assertEqual(WaiterModel({'version': 2, 'waiters': {}}).version, 2)
+ assert WaiterModel({'version': 2, 'waiters': {}}).version == 2
def test_wont_load_missing_version_in_config(self):
# We only load waiter configs if we know for sure that we're
@@ -52,7 +53,7 @@ class TestWaiterModel(unittest.TestCase):
# Missing the 'version' key.
'waiters': {}
}
- with self.assertRaises(WaiterConfigError):
+ with pytest.raises(WaiterConfigError):
WaiterModel(waiters)
def test_unsupported_waiter_version(self):
@@ -60,7 +61,7 @@ class TestWaiterModel(unittest.TestCase):
'version': 1,
'waiters': {}
}
- with self.assertRaises(WaiterConfigError):
+ with pytest.raises(WaiterConfigError):
WaiterModel(waiters)
def test_waiter_names(self):
@@ -71,8 +72,8 @@ class TestWaiterModel(unittest.TestCase):
'FooWaiter': {},
}
}
- self.assertEqual(WaiterModel(waiters).waiter_names, ['BarWaiter',
- 'FooWaiter'])
+ assert WaiterModel(waiters).waiter_names == ['BarWaiter',
+ 'FooWaiter']
def test_get_single_waiter_config(self):
single_waiter = {
@@ -93,7 +94,7 @@ class TestWaiterModel(unittest.TestCase):
}
model = WaiterModel(waiters)
config = model.get_waiter('BucketExists')
- self.assertEqual(config.operation, 'HeadBucket')
+ assert config.operation == 'HeadBucket'
def test_get_waiter_does_not_exist(self):
waiters = {
@@ -101,7 +102,7 @@ class TestWaiterModel(unittest.TestCase):
'waiters': {}
}
model = WaiterModel(waiters)
- with self.assertRaises(ValueError):
+ with pytest.raises(ValueError):
model.get_waiter('UnknownWaiter')
def test_single_waiter_config_attributes(self):
@@ -114,10 +115,10 @@ class TestWaiterModel(unittest.TestCase):
],
}
config = SingleWaiterConfig(single_waiter)
- self.assertEqual(config.description, 'Waiter description')
- self.assertEqual(config.operation, 'HeadBucket')
- self.assertEqual(config.delay, 5)
- self.assertEqual(config.max_attempts, 20)
+ assert config.description == 'Waiter description'
+ assert config.operation == 'HeadBucket'
+ assert config.delay == 5
+ assert config.max_attempts == 20
def test_single_waiter_acceptors_built_with_matcher_func(self):
# When the list of acceptors are requested, we actually will transform
@@ -131,10 +132,10 @@ class TestWaiterModel(unittest.TestCase):
config = SingleWaiterConfig(single_waiter)
success_acceptor = config.acceptors[0]
- self.assertEqual(success_acceptor.state, 'success')
- self.assertEqual(success_acceptor.matcher, 'status')
- self.assertEqual(success_acceptor.expected, 200)
- self.assertTrue(callable(success_acceptor.matcher_func))
+ assert success_acceptor.state == 'success'
+ assert success_acceptor.matcher == 'status'
+ assert success_acceptor.expected == 200
+ assert callable(success_acceptor.matcher_func)
def test_single_waiter_acceptor_matches_jmespath(self):
single_waiter = {
@@ -148,10 +149,8 @@ class TestWaiterModel(unittest.TestCase):
success_acceptor = config.acceptors[0].matcher_func
# success_acceptor is a callable that takes a response dict and returns
# True or False.
- self.assertTrue(
- success_acceptor({'Table': {'TableStatus': 'ACCEPTED'}}))
- self.assertFalse(
- success_acceptor({'Table': {'TableStatus': 'CREATING'}}))
+ assert success_acceptor({'Table': {'TableStatus': 'ACCEPTED'}}) is True
+ assert not success_acceptor({'Table': {'TableStatus': 'CREATING'}})
def test_single_waiter_supports_status_code(self):
single_waiter = {
@@ -163,10 +162,8 @@ class TestWaiterModel(unittest.TestCase):
single_waiter.update(self.boiler_plate_config)
config = SingleWaiterConfig(single_waiter)
success_acceptor = config.acceptors[0].matcher_func
- self.assertTrue(
- success_acceptor({'ResponseMetadata': {'HTTPStatusCode': 200}}))
- self.assertFalse(
- success_acceptor({'ResponseMetadata': {'HTTPStatusCode': 404}}))
+ assert success_acceptor({'ResponseMetadata': {'HTTPStatusCode': 200}}) is True
+ assert not success_acceptor({'ResponseMetadata': {'HTTPStatusCode': 404}})
def test_single_waiter_supports_error(self):
single_waiter = {
@@ -178,10 +175,8 @@ class TestWaiterModel(unittest.TestCase):
single_waiter.update(self.boiler_plate_config)
config = SingleWaiterConfig(single_waiter)
success_acceptor = config.acceptors[0].matcher_func
- self.assertTrue(
- success_acceptor({'Error': {'Code': 'DoesNotExistError'}}))
- self.assertFalse(
- success_acceptor({'Error': {'Code': 'DoesNotExistErorr'}}))
+ assert success_acceptor({'Error': {'Code': 'DoesNotExistError'}}) is True
+ assert not success_acceptor({'Error': {'Code': 'DoesNotExistErorr'}})
def test_unknown_matcher(self):
unknown_type = 'arbitrary_type'
@@ -193,26 +188,22 @@ class TestWaiterModel(unittest.TestCase):
}
single_waiter.update(self.boiler_plate_config)
config = SingleWaiterConfig(single_waiter)
- with self.assertRaises(WaiterConfigError):
+ with pytest.raises(WaiterConfigError):
config.acceptors
def test_single_waiter_supports_path_all(self):
matches = self.create_acceptor_function(
for_config={'state': 'success', 'matcher': 'pathAll',
'argument': 'Tables[].State', 'expected': 'GOOD'})
- self.assertTrue(
- matches({'Tables': [{"State": "GOOD"}]}))
- self.assertTrue(
- matches({'Tables': [{"State": "GOOD"}, {"State": "GOOD"}]}))
+ assert matches({'Tables': [{"State": "GOOD"}]}) is True
+ assert matches({'Tables': [{"State": "GOOD"}, {"State": "GOOD"}]}) is True
def test_single_waiter_supports_path_any(self):
matches = self.create_acceptor_function(
for_config={'state': 'failure', 'matcher': 'pathAny',
'argument': 'Tables[].State', 'expected': 'FAIL'})
- self.assertTrue(
- matches({'Tables': [{"State": "FAIL"}]}))
- self.assertTrue(
- matches({'Tables': [{"State": "GOOD"}, {"State": "FAIL"}]}))
+ assert matches({'Tables': [{"State": "FAIL"}]}) is True
+ assert matches({'Tables': [{"State": "GOOD"}, {"State": "FAIL"}]}) is True
def test_waiter_handles_error_responses_with_path_matchers(self):
path_any = self.create_acceptor_function(
@@ -227,41 +218,34 @@ class TestWaiterModel(unittest.TestCase):
for_config={'state': 'success', 'matcher': 'path',
'argument': 'length(Tables) > `0`',
'expected': True})
- self.assertFalse(path_any({'Error': {'Code': 'DoesNotExist'}}))
- self.assertFalse(path_all({'Error': {'Code': 'DoesNotExist'}}))
- self.assertFalse(path({'Error': {'Code': 'DoesNotExist'}}))
+ assert not path_any({'Error': {'Code': 'DoesNotExist'}})
+ assert not path_all({'Error': {'Code': 'DoesNotExist'}})
+ assert not path({'Error': {'Code': 'DoesNotExist'}})
def test_single_waiter_does_not_match_path_all(self):
matches = self.create_acceptor_function(
for_config={'state': 'success', 'matcher': 'pathAll',
'argument': 'Tables[].State', 'expected': 'GOOD'})
- self.assertFalse(
- matches({'Tables': [{"State": "GOOD"}, {"State": "BAD"}]}))
- self.assertFalse(
- matches({'Tables': [{"State": "BAD"}, {"State": "GOOD"}]}))
- self.assertFalse(
- matches({'Tables': [{"State": "BAD"}, {"State": "BAD"}]}))
- self.assertFalse(
- matches({'Tables': []}))
- self.assertFalse(
- matches({'Tables': [{"State": "BAD"},
+ assert not matches({'Tables': [{"State": "GOOD"}, {"State": "BAD"}]})
+ assert not matches({'Tables': [{"State": "BAD"}, {"State": "GOOD"}]})
+ assert not matches({'Tables': [{"State": "BAD"}, {"State": "BAD"}]})
+ assert not matches({'Tables': []})
+ assert not matches({'Tables': [{"State": "BAD"},
{"State": "BAD"},
{"State": "BAD"},
- {"State": "BAD"}]}))
+ {"State": "BAD"}]})
def test_path_all_missing_field(self):
matches = self.create_acceptor_function(
for_config={'state': 'success', 'matcher': 'pathAll',
'argument': 'Tables[].State', 'expected': 'GOOD'})
- self.assertFalse(
- matches({'Tables': [{"NotState": "GOOD"}, {"NotState": "BAD"}]}))
+ assert not matches({'Tables': [{"NotState": "GOOD"}, {"NotState": "BAD"}]})
def test_path_all_matcher_does_not_receive_list(self):
matches = self.create_acceptor_function(
for_config={'state': 'success', 'matcher': 'pathAll',
'argument': 'Tables[].State', 'expected': 'GOOD'})
- self.assertFalse(
- matches({"NotTables": []}))
+ assert not matches({"NotTables": []})
def test_single_waiter_supports_all_three_states(self):
single_waiter = {
@@ -280,9 +264,9 @@ class TestWaiterModel(unittest.TestCase):
# Each acceptors should be able to handle not matching
# any type of response.
matches_nothing = {}
- self.assertFalse(acceptors[0].matcher_func(matches_nothing))
- self.assertFalse(acceptors[1].matcher_func(matches_nothing))
- self.assertFalse(acceptors[2].matcher_func(matches_nothing))
+ assert not acceptors[0].matcher_func(matches_nothing)
+ assert not acceptors[1].matcher_func(matches_nothing)
+ assert not acceptors[2].matcher_func(matches_nothing)
class TestWaitersObjects(unittest.TestCase):
@@ -327,7 +311,7 @@ class TestWaitersObjects(unittest.TestCase):
for_operation=operation_method
)
waiter.wait()
- self.assertEqual(operation_method.call_count, 3)
+ assert operation_method.call_count == 3
def test_waiter_never_matches(self):
# Verify that a matcher will fail after max_attempts
@@ -341,7 +325,7 @@ class TestWaitersObjects(unittest.TestCase):
for_operation=operation_method
)
waiter = Waiter('MyWaiter', config, operation_method)
- with self.assertRaises(WaiterError):
+ with pytest.raises(WaiterError):
waiter.wait()
def test_unspecified_errors_stops_waiter(self):
@@ -358,7 +342,7 @@ class TestWaitersObjects(unittest.TestCase):
for_operation=operation_method
)
waiter = Waiter('MyWaiter', config, operation_method)
- with self.assertRaises(WaiterError):
+ with pytest.raises(WaiterError):
waiter.wait()
def test_last_response_available_on_waiter_error(self):
@@ -368,9 +352,9 @@ class TestWaitersObjects(unittest.TestCase):
self.client_responses_are(last_response,
for_operation=operation_method)
waiter = Waiter('MyWaiter', config, operation_method)
- with self.assertRaises(WaiterError) as e:
+ with pytest.raises(WaiterError) as e:
waiter.wait()
- self.assertEqual(e.exception.last_response, last_response)
+ assert e.value.exception.last_response == last_response
def test_unspecified_errors_propagate_error_code(self):
# If a waiter receives an error response, then the
@@ -389,7 +373,7 @@ class TestWaitersObjects(unittest.TestCase):
)
waiter = Waiter('MyWaiter', config, operation_method)
- with six.assertRaisesRegex(self, WaiterError, error_message):
+ with pytest.raises(WaiterError, match=error_message):
waiter.wait()
def _assert_failure_state_error_raised(self, acceptors, responses, expected_msg):
@@ -459,12 +443,12 @@ class TestWaitersObjects(unittest.TestCase):
for_operation=operation_method
)
waiter = Waiter('MyWaiter', config, operation_method)
- with self.assertRaises(WaiterError):
+ with pytest.raises(WaiterError):
waiter.wait()
# Not only should we raise an exception, but we should have
# only called the operation_method twice because the second
# response triggered a fast fail.
- self.assertEqual(operation_method.call_count, 2)
+ assert operation_method.call_count == 2
def test_waiter_handles_retry_state(self):
acceptor_with_retry_state = [
@@ -484,7 +468,7 @@ class TestWaitersObjects(unittest.TestCase):
)
waiter = Waiter('MyWaiter', config, operation_method)
waiter.wait()
- self.assertEqual(operation_method.call_count, 3)
+ assert operation_method.call_count == 3
def test_kwargs_are_passed_through(self):
acceptors = [
@@ -517,12 +501,12 @@ class TestWaitersObjects(unittest.TestCase):
for_operation=operation_method
)
waiter = Waiter('MyWaiter', config, operation_method)
- with self.assertRaises(WaiterError):
+ with pytest.raises(WaiterError):
waiter.wait()
# We attempt three times, which means we need to sleep
# twice, once before each subsequent request.
- self.assertEqual(sleep_mock.call_count, 2)
+ assert sleep_mock.call_count == 2
sleep_mock.assert_called_with(delay_time)
@mock.patch('time.sleep')
@@ -537,12 +521,12 @@ class TestWaitersObjects(unittest.TestCase):
)
waiter = Waiter('MyWaiter', config, operation_method)
custom_delay = 3
- with self.assertRaises(WaiterError):
+ with pytest.raises(WaiterError):
waiter.wait(WaiterConfig={'Delay': custom_delay})
# We attempt three times, which means we need to sleep
# twice, once before each subsequent request.
- self.assertEqual(sleep_mock.call_count, 2)
+ assert sleep_mock.call_count == 2
sleep_mock.assert_called_with(custom_delay)
def test_waiter_invocation_config_honors_max_attempts(self):
@@ -555,10 +539,10 @@ class TestWaitersObjects(unittest.TestCase):
)
waiter = Waiter('MyWaiter', config, operation_method)
custom_max = 2
- with self.assertRaises(WaiterError):
+ with pytest.raises(WaiterError):
waiter.wait(WaiterConfig={'MaxAttempts': custom_max})
- self.assertEqual(operation_method.call_count, 2)
+ assert operation_method.call_count == 2
class TestCreateWaiter(unittest.TestCase):
@@ -609,16 +593,13 @@ class TestCreateWaiter(unittest.TestCase):
waiter_name = 'WaiterName'
waiter = create_waiter_with_client(
waiter_name, self.waiter_model, self.client)
- self.assertIsInstance(waiter, Waiter)
+ assert isinstance(waiter, Waiter)
def test_waiter_class_name(self):
waiter_name = 'WaiterName'
waiter = create_waiter_with_client(
waiter_name, self.waiter_model, self.client)
- self.assertEqual(
- waiter.__class__.__name__,
- 'MyService.Waiter.WaiterName'
- )
+ assert waiter.__class__.__name__ == 'MyService.Waiter.WaiterName'
def test_waiter_help_documentation(self):
waiter_name = 'WaiterName'
@@ -641,7 +622,7 @@ class TestCreateWaiter(unittest.TestCase):
' :returns: None',
]
for line in lines:
- self.assertIn(line, content)
+ assert line in content
class TestOperationMethods(unittest.TestCase):
@@ -663,7 +644,7 @@ class TestOperationMethods(unittest.TestCase):
exception = ClientError(parsed_response, 'OperationName')
client_method.side_effect = exception
actual_response = op(Foo='a', Bar='b')
- self.assertEqual(actual_response, parsed_response)
+ assert actual_response == parsed_response
class ServiceWaiterFunctionalTest(BaseEnvVar):
@@ -714,7 +695,7 @@ class CloudFrontWaitersTest(ServiceWaiterFunctionalTest):
waiter = create_waiter_with_client(waiter_name, waiter_model,
self.client)
waiter.wait()
- self.assertEqual(self.client.get_distribution.call_count, 1)
+ assert self.client.get_distribution.call_count == 1
def assert_invalidation_completed_call_count(self, api_version=None):
waiter_name = 'InvalidationCompleted'
@@ -727,7 +708,7 @@ class CloudFrontWaitersTest(ServiceWaiterFunctionalTest):
waiter = create_waiter_with_client(waiter_name, waiter_model,
self.client)
waiter.wait()
- self.assertEqual(self.client.get_invalidation.call_count, 1)
+ assert self.client.get_invalidation.call_count == 1
def assert_streaming_distribution_deployed_call_count(
self, api_version=None):
@@ -741,7 +722,7 @@ class CloudFrontWaitersTest(ServiceWaiterFunctionalTest):
waiter = create_waiter_with_client(waiter_name, waiter_model,
self.client)
waiter.wait()
- self.assertEqual(self.client.get_streaming_distribution.call_count, 1)
+ assert self.client.get_streaming_distribution.call_count == 1
def test_distribution_deployed(self):
# Test the latest version.
--
2.29.2
From 8f77f9234b6a07921cc811a9226a726c08179d16 Mon Sep 17 00:00:00 2001
From: Jordan Guymon <jogu.sd@gmail.com>
Date: Fri, 13 Nov 2020 11:28:21 -0800
Subject: [PATCH 10/14] Purge nose usage after rebase
---
tests/functional/test_s3_control_redirects.py | 11 +++++------
1 file changed, 5 insertions(+), 6 deletions(-)
diff --git a/tests/functional/test_s3_control_redirects.py b/tests/functional/test_s3_control_redirects.py
index 7dcf7f8a..566a0c2d 100644
--- a/tests/functional/test_s3_control_redirects.py
+++ b/tests/functional/test_s3_control_redirects.py
@@ -12,7 +12,6 @@
# language governing permissions and limitations under the License.
import re
from contextlib import contextmanager
-from nose.tools import assert_equal, assert_true, assert_raises
from tests import unittest, mock, BaseSessionTest, ClientHTTPStubber
from botocore import exceptions
@@ -283,22 +282,22 @@ def _assert_signing_name(stubber, expected_name):
request = stubber.requests[0]
auth_header = request.headers['Authorization'].decode('utf-8')
actual_name = V4_AUTH_REGEX.match(auth_header).group('name')
- assert_equal(expected_name, actual_name)
+ assert expected_name == actual_name
def _assert_netloc(stubber, expected_netloc):
request = stubber.requests[0]
url_parts = urlsplit(request.url)
- assert_equal(expected_netloc, url_parts.netloc)
+ assert expected_netloc == url_parts.netloc
def _assert_header(stubber, key, value):
request = stubber.requests[0]
- assert_true(key in request.headers)
+ assert key in request.headers
actual_value = request.headers[key]
if isinstance(actual_value, bytes):
actual_value = actual_value.decode('utf-8')
- assert_equal(value, actual_value)
+ assert value == actual_value
def _assert_headers(stubber, headers):
@@ -372,7 +371,7 @@ def _assert_test_case(test_case, client, stubber):
) % (exception_cls, type(exception_raised))
assert isinstance(exception_raised, exception_cls), error_msg
else:
- assert_equal(len(stubber.requests), 1)
+ assert len(stubber.requests) == 1
if 'signing_name' in assertions:
_assert_signing_name(stubber, assertions['signing_name'])
if 'headers' in assertions:
--
2.29.2
From 214bb1486da0a9cc47acde34b604dc4f2c696d49 Mon Sep 17 00:00:00 2001
From: Jordan Guymon <jogu.sd@gmail.com>
Date: Fri, 13 Nov 2020 11:40:30 -0800
Subject: [PATCH 11/14] Fix botched pytest parameterization
---
tests/functional/test_paginator_config.py | 2 --
1 file changed, 2 deletions(-)
diff --git a/tests/functional/test_paginator_config.py b/tests/functional/test_paginator_config.py
index 227519eb..3b84e014 100644
--- a/tests/functional/test_paginator_config.py
+++ b/tests/functional/test_paginator_config.py
@@ -132,7 +132,6 @@ KNOWN_EXTRA_OUTPUT_KEYS = [
def _generate_page_configs():
- page_configs = []
session = botocore.session.get_session()
loader = session.get_component('data_loader')
services = loader.list_available_services('paginators-1')
@@ -142,7 +141,6 @@ def _generate_page_configs():
'paginators-1',
service_model.api_version)
yield (service_model, page_config['pagination'])
- return page_configs
@pytest.mark.parametrize('service_model, pagination', _generate_page_configs())
--
2.29.2
From 961f291b7d9ad47e9b4453bf1cb0aa5e4fd00ff1 Mon Sep 17 00:00:00 2001
From: Jordan Guymon <jogu.sd@gmail.com>
Date: Fri, 13 Nov 2020 11:59:23 -0800
Subject: [PATCH 12/14] Fix credential test on Python 2.7
---
tests/unit/test_credentials.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tests/unit/test_credentials.py b/tests/unit/test_credentials.py
index 1c8d914f..47d3553a 100644
--- a/tests/unit/test_credentials.py
+++ b/tests/unit/test_credentials.py
@@ -74,8 +74,8 @@ def path(filename):
class TestCredentials(BaseEnvVar):
def _ensure_credential_is_normalized_as_unicode(self, access, secret):
c = credentials.Credentials(access, secret)
- assert isinstance(c.access_key, str)
- assert isinstance(c.secret_key, str)
+ assert isinstance(c.access_key, six.text_type)
+ assert isinstance(c.secret_key, six.text_type)
def test_detect_nonascii_character(self):
self._ensure_credential_is_normalized_as_unicode(
--
2.29.2
From 055bb13384b3425c80ff87ceead4dcc80da3bd85 Mon Sep 17 00:00:00 2001
From: Jordan Guymon <jogu.sd@gmail.com>
Date: Fri, 13 Nov 2020 14:30:12 -0800
Subject: [PATCH 13/14] Finish pytest parameterization of unit tests
---
tests/unit/auth/test_sigv4.py | 19 ++--
tests/unit/protocols/input/rest-json.json | 1 +
tests/unit/retries/test_standard.py | 49 +++++----
tests/unit/test_compat.py | 127 ++++++++++++----------
tests/unit/test_eventstream.py | 26 ++---
tests/unit/test_model.py | 36 +++---
tests/unit/test_parsers.py | 24 ++--
tests/unit/test_protocols.py | 43 +++++---
8 files changed, 176 insertions(+), 149 deletions(-)
diff --git a/tests/unit/auth/test_sigv4.py b/tests/unit/auth/test_sigv4.py
index a4e35aea..67597c24 100644
--- a/tests/unit/auth/test_sigv4.py
+++ b/tests/unit/auth/test_sigv4.py
@@ -25,6 +25,7 @@ import os
import logging
import io
import datetime
+import pytest
from botocore.compat import six
from tests import mock
@@ -88,7 +89,16 @@ class RawHTTPRequest(six.moves.BaseHTTPServer.BaseHTTPRequestHandler):
self.error_message = message
-def test_generator():
+def _load_test_cases():
+ cases = set(os.path.splitext(i)[0] for i in os.listdir(TESTSUITE_DIR))
+ for case in cases:
+ if case in TESTS_TO_IGNORE:
+ continue
+ yield case
+
+
+@pytest.mark.parametrize("test_case", _load_test_cases())
+def test_generator(test_case):
datetime_patcher = mock.patch.object(
botocore.auth.datetime, 'datetime',
mock.Mock(wraps=datetime.datetime)
@@ -100,12 +110,7 @@ def test_generator():
# We have to change this because Sep 9, 2011 was actually
# a Friday, but the tests have this set to a Monday.
formatdate.return_value = 'Mon, 09 Sep 2011 23:36:00 GMT'
- for test_case in set(os.path.splitext(i)[0]
- for i in os.listdir(TESTSUITE_DIR)):
- if test_case in TESTS_TO_IGNORE:
- log.debug("Skipping test: %s", test_case)
- continue
- _test_signature_version_4(test_case)
+ _test_signature_version_4(test_case)
datetime_patcher.stop()
formatdate_patcher.stop()
diff --git a/tests/unit/protocols/input/rest-json.json b/tests/unit/protocols/input/rest-json.json
index 44f7eea7..87c1438f 100644
--- a/tests/unit/protocols/input/rest-json.json
+++ b/tests/unit/protocols/input/rest-json.json
@@ -15,6 +15,7 @@
},
"name": "OperationName"
},
+ "params": {},
"serialized": {
"body": "",
"uri": "/2014-01-01/jobs",
diff --git a/tests/unit/retries/test_standard.py b/tests/unit/retries/test_standard.py
index 9879578d..deb4cb59 100644
--- a/tests/unit/retries/test_standard.py
+++ b/tests/unit/retries/test_standard.py
@@ -1,3 +1,5 @@
+import pytest
+import itertools
from tests import unittest
from tests import mock
@@ -150,39 +152,46 @@ SERVICE_DESCRIPTION_WITH_RETRIES = {
}
-def test_can_detect_retryable_transient_errors():
+@pytest.mark.parametrize("case", RETRYABLE_TRANSIENT_ERRORS)
+def test_can_detect_retryable_transient_errors(case):
transient_checker = standard.TransientRetryableChecker()
- for case in RETRYABLE_TRANSIENT_ERRORS:
- _verify_retryable(transient_checker, None, *case)
+ _verify_retryable(transient_checker, None, *case)
-def test_can_detect_retryable_throttled_errors():
+@pytest.mark.parametrize("case", RETRYABLE_THROTTLED_RESPONSES)
+def test_can_detect_retryable_throttled_errors(case):
throttled_checker = standard.ThrottledRetryableChecker()
- for case in RETRYABLE_THROTTLED_RESPONSES:
- _verify_retryable(throttled_checker, None, *case)
+ _verify_retryable(throttled_checker, None, *case)
-def test_can_detect_modeled_retryable_errors():
+@pytest.mark.parametrize("case", RETRYABLE_MODELED_ERRORS)
+def test_can_detect_modeled_retryable_errors(case):
modeled_retry_checker = standard.ModeledRetryableChecker()
- for case in RETRYABLE_MODELED_ERRORS:
- _verify_retryable(modeled_retry_checker,
- get_operation_model_with_retries(), *case)
+ op_model = get_operation_model_with_retries()
+ _verify_retryable(modeled_retry_checker, op_model, *case)
+
+
+def _all_generic_cases():
+ all_cases = itertools.chain(
+ RETRYABLE_TRANSIENT_ERRORS,
+ RETRYABLE_THROTTLED_RESPONSES,
+ RETRYABLE_MODELED_ERRORS,
+ )
+ for case in all_cases:
+ # It's possible that cases that are retryable for an individual checker
+ # are retryable for a different checker. We need to filter out all
+ # the False cases.
+ if case[2]:
+ yield case
-def test_standard_retry_conditions():
+@pytest.mark.parametrize("case", _all_generic_cases())
+def test_standard_retry_conditions(case):
# This is verifying that the high level object used for checking
# retry conditions still handles all the individual testcases.
standard_checker = standard.StandardRetryConditions()
op_model = get_operation_model_with_retries()
- all_cases = (
- RETRYABLE_TRANSIENT_ERRORS + RETRYABLE_THROTTLED_RESPONSES +
- RETRYABLE_MODELED_ERRORS)
- # It's possible that cases that are retryable for an individual checker
- # are retryable for a different checker. We need to filter out all
- # the False cases.
- all_cases = [c for c in all_cases if c[2]]
- for case in all_cases:
- _verify_retryable(standard_checker, op_model, *case)
+ _verify_retryable(standard_checker, op_model, *case)
def get_operation_model_with_retries():
diff --git a/tests/unit/test_compat.py b/tests/unit/test_compat.py
index a4e1066f..0beae31f 100644
--- a/tests/unit/test_compat.py
+++ b/tests/unit/test_compat.py
@@ -13,6 +13,7 @@
import datetime
from tests import mock
import pytest
+import itertools
from botocore.exceptions import MD5UnavailableError
from botocore.compat import (
@@ -97,67 +98,75 @@ class TestGetMD5(unittest.TestCase):
get_md5()
-class TestCompatShellSplit(unittest.TestCase):
- def test_compat_shell_split_windows(self):
- windows_cases = {
- r'': [],
- r'spam \\': [r'spam', '\\\\'],
- r'spam ': [r'spam'],
- r' spam': [r'spam'],
- 'spam eggs': [r'spam', r'eggs'],
- 'spam\teggs': [r'spam', r'eggs'],
- 'spam\neggs': ['spam\neggs'],
- '""': [''],
- '" "': [' '],
- '"\t"': ['\t'],
- '\\\\': ['\\\\'],
- '\\\\ ': ['\\\\'],
- '\\\\\t': ['\\\\'],
- r'\"': ['"'],
- # The following four test cases are official test cases given in
- # Microsoft's documentation.
- r'"abc" d e': [r'abc', r'd', r'e'],
- r'a\\b d"e f"g h': [r'a\\b', r'de fg', r'h'],
- r'a\\\"b c d': [r'a\"b', r'c', r'd'],
- r'a\\\\"b c" d e': [r'a\\b c', r'd', r'e']
- }
- for input_string, expected_output in windows_cases.items():
- assert compat_shell_split(input_string, "win32") == expected_output
+WINDOWS_SHELL_SPLIT_CASES = {
+ r'': [],
+ r'spam \\': [r'spam', '\\\\'],
+ r'spam ': [r'spam'],
+ r' spam': [r'spam'],
+ 'spam eggs': [r'spam', r'eggs'],
+ 'spam\teggs': [r'spam', r'eggs'],
+ 'spam\neggs': ['spam\neggs'],
+ '""': [''],
+ '" "': [' '],
+ '"\t"': ['\t'],
+ '\\\\': ['\\\\'],
+ '\\\\ ': ['\\\\'],
+ '\\\\\t': ['\\\\'],
+ r'\"': ['"'],
+ # The following four test cases are official test cases given in
+ # Microsoft's documentation.
+ r'"abc" d e': [r'abc', r'd', r'e'],
+ r'a\\b d"e f"g h': [r'a\\b', r'de fg', r'h'],
+ r'a\\\"b c d': [r'a\"b', r'c', r'd'],
+ r'a\\\\"b c" d e': [r'a\\b c', r'd', r'e']
+}
+
+
+@pytest.mark.parametrize(
+ "input_string, expected_output",
+ WINDOWS_SHELL_SPLIT_CASES.items(),
+)
+def test_compat_shell_split_windows(input_string, expected_output):
+ assert compat_shell_split(input_string, "win32") == expected_output
+
+
+UNIX_SHELL_SPLIT_CASES = {
+ r'': [],
+ r'spam \\': [r'spam', '\\'],
+ r'spam ': [r'spam'],
+ r' spam': [r'spam'],
+ 'spam eggs': [r'spam', r'eggs'],
+ 'spam\teggs': [r'spam', r'eggs'],
+ 'spam\neggs': ['spam', 'eggs'],
+ '""': [''],
+ '" "': [' '],
+ '"\t"': ['\t'],
+ '\\\\': ['\\'],
+ '\\\\ ': ['\\'],
+ '\\\\\t': ['\\'],
+ r'\"': ['"'],
+ # The following four test cases are official test cases given in
+ # Microsoft's documentation, but adapted to unix shell splitting.
+ r'"abc" d e': [r'abc', r'd', r'e'],
+ r'a\\b d"e f"g h': [r'a\b', r'de fg', r'h'],
+ r'a\\\"b c d': [r'a\"b', r'c', r'd'],
+ r'a\\\\"b c" d e': [r'a\\b c', r'd', r'e']
+}
+
+
+@pytest.mark.parametrize(
+ "platform, test_case",
+ itertools.product(["linux2", "darwin"], UNIX_SHELL_SPLIT_CASES.items()),
+)
+def test_compat_shell_split_unix(platform, test_case):
+ input_string, expected_output = test_case
+ assert compat_shell_split(input_string, platform) == expected_output
- with pytest.raises(ValueError):
- compat_shell_split(r'"', "win32")
-
- def test_compat_shell_split_unix(self):
- unix_cases = {
- r'': [],
- r'spam \\': [r'spam', '\\'],
- r'spam ': [r'spam'],
- r' spam': [r'spam'],
- 'spam eggs': [r'spam', r'eggs'],
- 'spam\teggs': [r'spam', r'eggs'],
- 'spam\neggs': ['spam', 'eggs'],
- '""': [''],
- '" "': [' '],
- '"\t"': ['\t'],
- '\\\\': ['\\'],
- '\\\\ ': ['\\'],
- '\\\\\t': ['\\'],
- r'\"': ['"'],
- # The following four test cases are official test cases given in
- # Microsoft's documentation, but adapted to unix shell splitting.
- r'"abc" d e': [r'abc', r'd', r'e'],
- r'a\\b d"e f"g h': [r'a\b', r'de fg', r'h'],
- r'a\\\"b c d': [r'a\"b', r'c', r'd'],
- r'a\\\\"b c" d e': [r'a\\b c', r'd', r'e']
- }
- for input_string, expected_output in unix_cases.items():
- assert compat_shell_split(input_string, "linux2") == expected_output
- assert compat_shell_split(input_string, "darwin") == expected_output
- with pytest.raises(ValueError):
- compat_shell_split(r'"', "linux2")
- with pytest.raises(ValueError):
- compat_shell_split(r'"', "darwin")
+@pytest.mark.parametrize("platform", ["linux2", "darwin", "win32"])
+def test_compat_shell_split_error(platform):
+ with pytest.raises(ValueError):
+ compat_shell_split(r'"', platform)
class TestTimezoneOperations(unittest.TestCase):
diff --git a/tests/unit/test_eventstream.py b/tests/unit/test_eventstream.py
index 45336bdc..05e06e8c 100644
--- a/tests/unit/test_eventstream.py
+++ b/tests/unit/test_eventstream.py
@@ -12,6 +12,7 @@
# language governing permissions and limitations under the License.
"""Unit tests for the binary event stream decoder. """
+import pytest
from tests import mock
from botocore.parsers import EventStreamXMLParser
@@ -270,10 +271,9 @@ def check_message_decodes(encoded, decoded):
assert_message_equal(messages[0], decoded)
-def test_positive_cases():
- """Test that all positive cases decode how we expect. """
- for (encoded, decoded) in POSITIVE_CASES:
- check_message_decodes(encoded, decoded)
+@pytest.mark.parametrize("encoded, decoded", POSITIVE_CASES)
+def test_positive_cases(encoded, decoded):
+ check_message_decodes(encoded, decoded)
def test_all_positive_cases():
@@ -291,16 +291,16 @@ def test_all_positive_cases():
assert_message_equal(expected, decoded)
-def test_negative_cases():
+@pytest.mark.parametrize("encoded, exception", NEGATIVE_CASES)
+def test_negative_cases(encoded, exception):
"""Test that all negative cases raise the expected exception. """
- for (encoded, exception) in NEGATIVE_CASES:
- try:
- check_message_decodes(encoded, None)
- except exception:
- pass
- else:
- raise AssertionError(
- 'Expected exception {!s} has not been raised.'.format(exception))
+ try:
+ check_message_decodes(encoded, None)
+ except exception:
+ pass
+ else:
+ error_msg = 'Expected exception {!s} has not been raised.'
+ raise AssertionError(error_msg.format(exception))
def test_header_parser():
diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py
index 65243bfb..88fd7625 100644
--- a/tests/unit/test_model.py
+++ b/tests/unit/test_model.py
@@ -6,30 +6,26 @@ from botocore.compat import OrderedDict
from botocore.compat import six
-def test_missing_model_attribute_raises_exception():
+@pytest.mark.parametrize("attr_name", ['api_version', 'protocol'])
+def test_missing_model_attribute_raises_exception(attr_name):
# We're using a test generator here to cut down
# on the duplication. The property names below
# all have the same test logic.
service_model = model.ServiceModel({'metadata': {'endpointPrefix': 'foo'}})
- property_names = ['api_version', 'protocol']
-
- def _test_attribute_raise_exception(attr_name):
- try:
- getattr(service_model, attr_name)
- except model.UndefinedModelAttributeError:
- # This is what we expect, so the test passes.
- pass
- except Exception as e:
- raise AssertionError("Expected UndefinedModelAttributeError to "
- "be raised, but %s was raised instead" %
- (e.__class__))
- else:
- raise AssertionError(
- "Expected UndefinedModelAttributeError to "
- "be raised, but no exception was raised for: %s" % attr_name)
-
- for name in property_names:
- _test_attribute_raise_exception(name)
+
+ try:
+ getattr(service_model, attr_name)
+ except model.UndefinedModelAttributeError:
+ # This is what we expect, so the test passes.
+ pass
+ except Exception as e:
+ raise AssertionError("Expected UndefinedModelAttributeError to "
+ "be raised, but %s was raised instead" %
+ (e.__class__))
+ else:
+ raise AssertionError(
+ "Expected UndefinedModelAttributeError to "
+ "be raised, but no exception was raised for: %s" % attr_name)
class TestServiceId(unittest.TestCase):
diff --git a/tests/unit/test_parsers.py b/tests/unit/test_parsers.py
index 9b2116ab..fa2c1cb1 100644
--- a/tests/unit/test_parsers.py
+++ b/tests/unit/test_parsers.py
@@ -12,6 +12,7 @@
# language governing permissions and limitations under the License.
from tests import unittest, RawResponse
import datetime
+import itertools
import pytest
from dateutil.tz import tzutc
@@ -1292,23 +1293,20 @@ class TestParseErrorResponses(unittest.TestCase):
assert error['Message'] == ''
-def test_can_handle_generic_error_message():
- # There are times when you can get a service to respond with a generic
- # html error page. We should be able to handle this case.
- for parser_cls in parsers.PROTOCOL_PARSERS.values():
- generic_html_body = (
- '<html><body><b>Http/1.1 Service Unavailable</b></body></html>'
- ).encode('utf-8')
- empty_body = b''
- none_body = None
- _assert_parses_generic_error, parser_cls(), generic_html_body
- _assert_parses_generic_error, parser_cls(), empty_body
- _assert_parses_generic_error, parser_cls(), none_body
+PARSER_CLASSES = parsers.PROTOCOL_PARSERS.values()
+GENERIC_ERROR_BODY = (
+ '<html><body><b>Http/1.1 Service Unavailable</b></body></html>'
+).encode('utf-8')
-def _assert_parses_generic_error(parser, body):
+@pytest.mark.parametrize(
+ "parser_cls, body",
+ itertools.product(PARSER_CLASSES, [b'', None, GENERIC_ERROR_BODY]),
+)
+def test_can_handle_generic_error_message(parser_cls, body):
# There are times when you can get a service to respond with a generic
# html error page. We should be able to handle this case.
+ parser = parser_cls()
parsed = parser.parse({
'body': body, 'headers': {}, 'status_code': 503}, None)
assert parsed['Error'] == {
diff --git a/tests/unit/test_protocols.py b/tests/unit/test_protocols.py
index b6a7ca0e..7ca89d40 100644
--- a/tests/unit/test_protocols.py
+++ b/tests/unit/test_protocols.py
@@ -52,6 +52,7 @@ can set the BOTOCORE_TEST_ID env var with the ``suite_id:test_id`` syntax.
"""
import os
import copy
+import pytest
from base64 import b64decode
from dateutil.tz import tzutc
@@ -92,16 +93,24 @@ PROTOCOL_TEST_BLACKLIST = [
]
-def test_compliance():
- for full_path in _walk_files():
- if full_path.endswith('.json'):
- for model, case, basename in _load_cases(full_path):
- if model.get('description') in PROTOCOL_TEST_BLACKLIST:
- continue
- if 'params' in case:
- _test_input(model, case, basename)
- elif 'response' in case:
- _test_output(model, case, basename)
+def _load_case_files():
+ for full_path in _walk_files():
+ if full_path.endswith('.json'):
+ for model, case, basename in _load_cases(full_path):
+ if model.get('description') in PROTOCOL_TEST_BLACKLIST:
+ continue
+ yield model, case, basename
+
+
+@pytest.mark.parametrize("model, case, basename", _load_case_files())
+def test_compliance(model, case, basename):
+ if 'params' in case:
+ _test_input(model, case, basename)
+ elif 'response' in case:
+ _test_output(model, case, basename)
+ else:
+ fmt = (model.get('description'), basename)
+ raise RuntimeError("Invalid case: '%s' in %s" % fmt)
def _test_input(json_description, case, basename):
@@ -140,7 +149,7 @@ def _assert_endpoints_equal(actual, expected, endpoint):
return
prepare_request_dict(actual, endpoint)
actual_host = urlsplit(actual['url']).netloc
- rich_assert_equal(actual_host, expected['host'], 'Host')
+ assert_equal(actual_host, expected['host'], 'Host')
class MockRawResponse(object):
@@ -206,7 +215,7 @@ def _test_output(json_description, case, basename):
expected_result.update(case['error'])
else:
expected_result = case['result']
- rich_assert_equal(parsed, expected_result, "Body")
+ assert_equal(parsed, expected_result, "Body")
except Exception as e:
_output_failure_message(model.metadata['protocol'],
case, parsed, expected_result, e)
@@ -316,7 +325,7 @@ def _try_json_dump(obj):
return str(obj)
-def rich_assert_equal(first, second, prefix):
+def assert_equal(first, second, prefix):
# A better assert equals. It allows you to just provide
# prefix instead of the entire message.
try:
@@ -351,14 +360,14 @@ def _serialize_request_description(request_dict):
def _assert_requests_equal(actual, expected):
- rich_assert_equal(actual['body'], expected.get('body', '').encode('utf-8'),
+ assert_equal(actual['body'], expected.get('body', '').encode('utf-8'),
'Body value')
actual_headers = dict(actual['headers'])
expected_headers = expected.get('headers', {})
- rich_assert_equal(actual_headers, expected_headers, "Header values")
- rich_assert_equal(actual['url_path'], expected.get('uri', ''), "URI")
+ assert_equal(actual_headers, expected_headers, "Header values")
+ assert_equal(actual['url_path'], expected.get('uri', ''), "URI")
if 'method' in expected:
- rich_assert_equal(actual['method'], expected['method'], "Method")
+ assert_equal(actual['method'], expected['method'], "Method")
def _walk_files():
--
2.29.2
From f018c60c14787f681492c07557ff533b0969db9f Mon Sep 17 00:00:00 2001
From: Jordan Guymon <jogu.sd@gmail.com>
Date: Fri, 13 Nov 2020 15:48:15 -0800
Subject: [PATCH 14/14] Finish pytest parameterization of functional tests
---
.../docs/test_shared_example_config.py | 12 +-
tests/functional/test_alias.py | 18 +-
tests/functional/test_client_class_names.py | 20 +-
tests/functional/test_endpoints.py | 21 +-
tests/functional/test_model_completeness.py | 36 +-
tests/functional/test_paginator_config.py | 18 +-
tests/functional/test_public_apis.py | 31 +-
tests/functional/test_regions.py | 24 +-
tests/functional/test_s3.py | 389 +++++++++---------
tests/functional/test_s3_control_redirects.py | 21 +-
tests/functional/test_six_imports.py | 27 +-
tests/functional/test_waiter_config.py | 44 +-
12 files changed, 349 insertions(+), 312 deletions(-)
diff --git a/tests/functional/docs/test_shared_example_config.py b/tests/functional/docs/test_shared_example_config.py
index c876957c..12f2bf4e 100644
--- a/tests/functional/docs/test_shared_example_config.py
+++ b/tests/functional/docs/test_shared_example_config.py
@@ -10,12 +10,14 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import pytest
+
import botocore.session
from botocore.model import OperationNotFoundError
from botocore.utils import parse_timestamp
-def test_lint_shared_example_configs():
+def _shared_example_configs():
session = botocore.session.Session()
loader = session.get_component('data_loader')
services = loader.list_available_services('examples-1')
@@ -27,10 +29,14 @@ def test_lint_shared_example_configs():
examples = example_config.get("examples", {})
for operation, operation_examples in examples.items():
for example in operation_examples:
- _lint_single_example(operation, example, service_model)
+ yield operation, example, service_model
-def _lint_single_example(operation_name, example_config, service_model):
+@pytest.mark.parametrize(
+ 'operation_name, example_config, service_model',
+ _shared_example_configs(),
+)
+def test_lint_single_example(operation_name, example_config, service_model):
# The operation should actually exist
assert_operation_exists(service_model, operation_name)
operation_model = service_model.operation_model(operation_name)
diff --git a/tests/functional/test_alias.py b/tests/functional/test_alias.py
index c80e1c78..1a00031f 100644
--- a/tests/functional/test_alias.py
+++ b/tests/functional/test_alias.py
@@ -10,6 +10,9 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import itertools
+import pytest
+
import botocore.session
from botocore.stub import Stubber
from botocore.exceptions import ParamValidationError
@@ -46,16 +49,13 @@ ALIAS_CASES = [
]
-def test_can_use_alias():
- session = botocore.session.get_session()
- for case in ALIAS_CASES:
- _can_use_parameter_in_client_call(session, case)
-
-
-def test_can_use_original_name():
+@pytest.mark.parametrize(
+ 'case, use_alias',
+ itertools.product(ALIAS_CASES, [True, False]),
+)
+def test_aliases(case, use_alias):
session = botocore.session.get_session()
- for case in ALIAS_CASES:
- _can_use_parameter_in_client_call(session, case, False)
+ _can_use_parameter_in_client_call(session, case, use_alias=use_alias)
def _can_use_parameter_in_client_call(session, case, use_alias=True):
diff --git a/tests/functional/test_client_class_names.py b/tests/functional/test_client_class_names.py
index e6a393f7..20c4c9a1 100644
--- a/tests/functional/test_client_class_names.py
+++ b/tests/functional/test_client_class_names.py
@@ -10,11 +10,13 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import pytest
from tests import unittest
+
import botocore.session
-REGION = 'us-east-1'
+REGION = 'us-east-1'
SERVICE_TO_CLASS_NAME = {
'autoscaling': 'AutoScaling',
'cloudformation': 'CloudFormation',
@@ -67,9 +69,13 @@ SERVICE_TO_CLASS_NAME = {
}
-class TestClientClassNames(unittest.TestCase):
- def test_client_has_correct_class_name(self):
- session = botocore.session.get_session()
- for service_name in SERVICE_TO_CLASS_NAME:
- client = session.create_client(service_name, REGION)
- assert client.__class__.__name__ == SERVICE_TO_CLASS_NAME[service_name]
+def _client_name_cases():
+ session = botocore.session.get_session()
+ for service_name, client_name in SERVICE_TO_CLASS_NAME.items():
+ client = session.create_client(service_name, REGION)
+ yield client.__class__.__name__, client_name
+
+
+@pytest.mark.parametrize("client_cls_name, client_name", _client_name_cases())
+def test_client_has_correct_class_name(client_cls_name, client_name):
+ assert client_cls_name == client_name
diff --git a/tests/functional/test_endpoints.py b/tests/functional/test_endpoints.py
index 113ec54f..19485116 100644
--- a/tests/functional/test_endpoints.py
+++ b/tests/functional/test_endpoints.py
@@ -10,6 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
+import pytest
from botocore.session import get_session
@@ -96,7 +97,7 @@ NOT_SUPPORTED_IN_SDK = [
]
-def test_endpoint_matches_service():
+def _known_endpoint_prefix_cases():
# This verifies client names match up with data from the endpoints.json
# file. We want to verify that every entry in the endpoints.json
# file corresponds to a client we can construct via
@@ -137,14 +138,18 @@ def test_endpoint_matches_service():
# prefix.
endpoint_prefix = ENDPOINT_PREFIX_OVERRIDE.get(endpoint_prefix,
endpoint_prefix)
- _assert_known_endpoint_prefix(endpoint_prefix, known_endpoint_prefixes)
+ yield endpoint_prefix, known_endpoint_prefixes
-def _assert_known_endpoint_prefix(endpoint_prefix, known_endpoint_prefixes):
+@pytest.mark.parametrize(
+ 'endpoint_prefix, known_endpoint_prefixes',
+ _known_endpoint_prefix_cases(),
+)
+def test_endpoint_prefix_known(endpoint_prefix, known_endpoint_prefixes):
assert endpoint_prefix in known_endpoint_prefixes
-def test_service_name_matches_endpoint_prefix():
+def _service_name_matches_endpoint_prefix_cases():
# Generates tests for each service to verify that the computed service
# named based on the service id matches the service name used to
# create a client (i.e the directory name in botocore/data)
@@ -157,10 +162,14 @@ def test_service_name_matches_endpoint_prefix():
services = loader.list_available_services('service-2')
for service in services:
- _assert_service_name_matches_endpoint_prefix(session, service)
+ yield session, service
-def _assert_service_name_matches_endpoint_prefix(session, service_name):
+@pytest.mark.parametrize(
+ "session, service_name",
+ _service_name_matches_endpoint_prefix_cases(),
+)
+def test_service_name_matches_endpoint_prefix(session, service_name):
service_model = session.get_service_model(service_name)
computed_name = service_model.service_id.replace(' ', '-').lower()
diff --git a/tests/functional/test_model_completeness.py b/tests/functional/test_model_completeness.py
index 365db9bc..af08632a 100644
--- a/tests/functional/test_model_completeness.py
+++ b/tests/functional/test_model_completeness.py
@@ -16,29 +16,35 @@ from botocore.loaders import Loader
from botocore.exceptions import DataNotFoundError
-def _test_model_is_not_lost(service_name, type_name,
- previous_version, latest_version):
+def _paginators_and_waiters():
+ for service_name in Session().get_available_services():
+ versions = Loader().list_api_versions(service_name, 'service-2')
+ if len(versions) > 1:
+ for type_name in ['paginators-1', 'waiters-2']:
+ yield service_name, type_name, versions[-2], versions[-1]
+
+
+@pytest.mark.parametrize(
+ 'service_name, type_name, previous_version, latest_version',
+ _paginators_and_waiters(),
+)
+def test_paginators_and_waiters_are_not_lost_in_new_version(
+ service_name, type_name, previous_version, latest_version,
+):
# Make sure if a paginator and/or waiter exists in previous version,
# there will be a successor existing in latest version.
loader = Loader()
try:
previous = loader.load_service_model(
- service_name, type_name, previous_version)
+ service_name, type_name, previous_version
+ )
except DataNotFoundError:
pass
else:
try:
latest = loader.load_service_model(
- service_name, type_name, latest_version)
+ service_name, type_name, latest_version
+ )
except DataNotFoundError as e:
- raise AssertionError(
- "%s must exist for %s: %s" % (type_name, service_name, e))
-
-@pytest.mark.parametrize('service_name', Session().get_available_services())
-def test_paginators_and_waiters_are_not_lost_in_new_version(service_name):
- versions = Loader().list_api_versions(service_name, 'service-2')
- if len(versions) > 1:
- for type_name in ['paginators-1', 'waiters-2']:
- _test_model_is_not_lost(service_name,
- type_name,
- versions[-2], versions[-1])
+ fmt = (type_name, service_name, e)
+ raise AssertionError("%s must exist for %s: %s" % fmt)
diff --git a/tests/functional/test_paginator_config.py b/tests/functional/test_paginator_config.py
index 3b84e014..28f4af74 100644
--- a/tests/functional/test_paginator_config.py
+++ b/tests/functional/test_paginator_config.py
@@ -131,7 +131,7 @@ KNOWN_EXTRA_OUTPUT_KEYS = [
]
-def _generate_page_configs():
+def _pagination_configs():
session = botocore.session.get_session()
loader = session.get_component('data_loader')
services = loader.list_available_services('paginators-1')
@@ -140,17 +140,15 @@ def _generate_page_configs():
page_config = loader.load_service_model(service_name,
'paginators-1',
service_model.api_version)
- yield (service_model, page_config['pagination'])
+ for op_name, single_config in page_config['pagination'].items():
+ yield op_name, single_config, service_model
-@pytest.mark.parametrize('service_model, pagination', _generate_page_configs())
-def test_lint_pagination_configs(service_model, pagination):
- for op_name, single_config in pagination.items():
- _lint_single_paginator(op_name, single_config, service_model)
-
-
-def _lint_single_paginator(operation_name, page_config,
- service_model):
+@pytest.mark.parametrize(
+ 'operation_name, page_config, service_model',
+ _pagination_configs(),
+)
+def test_lint_single_paginator(operation_name, page_config, service_model):
_validate_known_pagination_keys(page_config)
_valiate_result_key_exists(page_config)
_validate_referenced_operation_exists(operation_name, service_model)
diff --git a/tests/functional/test_public_apis.py b/tests/functional/test_public_apis.py
index 9219789f..322166bb 100644
--- a/tests/functional/test_public_apis.py
+++ b/tests/functional/test_public_apis.py
@@ -42,11 +42,27 @@ PUBLIC_API_TESTS = {
}
+def _generate_public_api_tests():
+ session = Session()
+ # Mimic the scenario that user does not have aws credentials setup
+ session.get_credentials = mock.Mock(return_value=None)
+ for service_name in PUBLIC_API_TESTS:
+ client = session.create_client(service_name, REGIONS[service_name])
+ for operation_name in PUBLIC_API_TESTS[service_name]:
+ kwargs = PUBLIC_API_TESTS[service_name][operation_name]
+ operation = getattr(client, xform_name(operation_name))
+ yield client, operation, kwargs
+
+
class EarlyExit(Exception):
pass
-def _test_public_apis_will_not_be_signed(client, operation, kwargs):
+@pytest.mark.parametrize(
+ 'client, operation, kwargs',
+ _generate_public_api_tests(),
+)
+def test_public_apis_will_not_be_signed(client, operation, kwargs):
with ClientHTTPStubber(client) as http_stubber:
http_stubber.responses.append(EarlyExit())
try:
@@ -60,16 +76,3 @@ def _test_public_apis_will_not_be_signed(client, operation, kwargs):
assert sig_v3_disabled, "SigV3 is incorrectly enabled"
sig_v4_disabled = 'Authorization' not in request.headers
assert sig_v4_disabled, "SigV4 is incorrectly enabled"
-
-
-@pytest.mark.parametrize('service_name, operations', PUBLIC_API_TESTS.items())
-def test_public_apis_will_not_be_signed(service_name, operations):
- session = Session()
-
- # Mimic the scenario that user does not have aws credentials setup
- session.get_credentials = mock.Mock(return_value=None)
- client = session.create_client(service_name, REGIONS[service_name])
- for operation_name in operations:
- kwargs = operations[operation_name]
- method = getattr(client, xform_name(operation_name))
- _test_public_apis_will_not_be_signed(client, method, kwargs)
diff --git a/tests/functional/test_regions.py b/tests/functional/test_regions.py
index 6ac1335a..f15a00b0 100644
--- a/tests/functional/test_regions.py
+++ b/tests/functional/test_regions.py
@@ -465,19 +465,23 @@ def test_all_s3_endpoints_have_s3v4():
assert 's3v4' in resolved['signatureVersions']
assert 'v4' not in resolved['signatureVersions']
-def _test_single_service_partition_endpoint(service_name,
- expected_endpoint,
- resolver):
+
+def _known_endpoints():
+ session = _get_patched_session()
+ resolver = session._get_internal_component('endpoint_resolver')
+ for service_name, endpoint in KNOWN_AWS_PARTITION_WIDE.items():
+ yield service_name, endpoint, resolver
+
+
+@pytest.mark.parametrize(
+ 'service_name, endpoint, resolver',
+ _known_endpoints(),
+)
+def test_single_service_partition_endpoint(service_name, endpoint, resolver):
bridge = ClientEndpointBridge(resolver)
result = bridge.resolve(service_name)
- assert result['endpoint_url'] == expected_endpoint
+ assert result['endpoint_url'] == endpoint
-def test_known_endpoints_other():
- resolver = _get_patched_session()._get_internal_component(
- 'endpoint_resolver')
- for service_name, endpoint in KNOWN_AWS_PARTITION_WIDE.items():
- _test_single_service_partition_endpoint(service_name,
- endpoint, resolver)
def test_non_partition_endpoint_requires_region():
resolver = _get_patched_session()._get_internal_component(
diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py
index 19adad48..0ccd5421 100644
--- a/tests/functional/test_s3.py
+++ b/tests/functional/test_s3.py
@@ -1231,51 +1231,50 @@ class TestGeneratePresigned(BaseS3OperationTest):
self.assert_is_v2_presigned_url(url)
-def test_checksums_included_in_expected_operations():
- """Validate expected calls include Content-MD5 header"""
-
- t = S3ChecksumCases(_verify_checksum_in_headers)
- t.case('put_bucket_tagging',
- {"Bucket": "foo", "Tagging": {"TagSet": []}})
- t.case('put_bucket_lifecycle',
- {"Bucket": "foo", "LifecycleConfiguration": {"Rules": []}})
- t.case('put_bucket_lifecycle_configuration',
- {"Bucket": "foo", "LifecycleConfiguration": {"Rules": []}})
- t.case('put_bucket_cors',
- {"Bucket": "foo", "CORSConfiguration": {"CORSRules": []}})
- t.case('delete_objects',
- {"Bucket": "foo", "Delete": {"Objects": [{"Key": "bar"}]}})
- t.case('put_bucket_replication',
- {"Bucket": "foo",
- "ReplicationConfiguration": {"Role": "", "Rules": []}})
- t.case('put_bucket_acl',
- {"Bucket": "foo", "AccessControlPolicy": {}})
- t.case('put_bucket_logging',
- {"Bucket": "foo",
- "BucketLoggingStatus": {}})
- t.case('put_bucket_notification',
- {"Bucket": "foo", "NotificationConfiguration": {}})
- t.case('put_bucket_policy',
- {"Bucket": "foo", "Policy": "<bucket-policy>"})
- t.case('put_bucket_request_payment',
- {"Bucket": "foo", "RequestPaymentConfiguration": {"Payer": ""}})
- t.case('put_bucket_versioning',
- {"Bucket": "foo", "VersioningConfiguration": {}})
- t.case('put_bucket_website',
- {"Bucket": "foo",
- "WebsiteConfiguration": {}})
- t.case('put_object_acl',
- {"Bucket": "foo", "Key": "bar", "AccessControlPolicy": {}})
- t.case('put_object_legal_hold',
- {"Bucket": "foo", "Key": "bar", "LegalHold": {"Status": "ON"}})
- t.case('put_object_retention',
- {"Bucket": "foo", "Key": "bar",
- "Retention": {"RetainUntilDate": "2020-11-05"}})
- t.case('put_object_lock_configuration',
- {"Bucket": "foo", "ObjectLockConfiguration": {}})
-
-
-def _verify_checksum_in_headers(operation, operation_kwargs):
+CHECKSUM_CASES = [
+ ('put_bucket_tagging',
+ {"Bucket": "foo", "Tagging": {"TagSet": []}}),
+ ('put_bucket_lifecycle',
+ {"Bucket": "foo", "LifecycleConfiguration": {"Rules": []}}),
+ ('put_bucket_lifecycle_configuration',
+ {"Bucket": "foo", "LifecycleConfiguration": {"Rules": []}}),
+ ('put_bucket_cors',
+ {"Bucket": "foo", "CORSConfiguration": {"CORSRules": []}}),
+ ('delete_objects',
+ {"Bucket": "foo", "Delete": {"Objects": [{"Key": "bar"}]}}),
+ ('put_bucket_replication',
+ {"Bucket": "foo",
+ "ReplicationConfiguration": {"Role": "", "Rules": []}}),
+ ('put_bucket_acl',
+ {"Bucket": "foo", "AccessControlPolicy": {}}),
+ ('put_bucket_logging',
+ {"Bucket": "foo",
+ "BucketLoggingStatus": {}}),
+ ('put_bucket_notification',
+ {"Bucket": "foo", "NotificationConfiguration": {}}),
+ ('put_bucket_policy',
+ {"Bucket": "foo", "Policy": "<bucket-policy>"}),
+ ('put_bucket_request_payment',
+ {"Bucket": "foo", "RequestPaymentConfiguration": {"Payer": ""}}),
+ ('put_bucket_versioning',
+ {"Bucket": "foo", "VersioningConfiguration": {}}),
+ ('put_bucket_website',
+ {"Bucket": "foo",
+ "WebsiteConfiguration": {}}),
+ ('put_object_acl',
+ {"Bucket": "foo", "Key": "bar", "AccessControlPolicy": {}}),
+ ('put_object_legal_hold',
+ {"Bucket": "foo", "Key": "bar", "LegalHold": {"Status": "ON"}}),
+ ('put_object_retention',
+ {"Bucket": "foo", "Key": "bar",
+ "Retention": {"RetainUntilDate": "2020-11-05"}}),
+ ('put_object_lock_configuration',
+ {"Bucket": "foo", "ObjectLockConfiguration": {}}),
+]
+
+
+@pytest.mark.parametrize('operation, operation_kwargs', CHECKSUM_CASES)
+def test_checksum_in_headers(operation, operation_kwargs):
environ = {}
with mock.patch('os.environ', environ):
environ['AWS_ACCESS_KEY_ID'] = 'access_key'
@@ -1291,42 +1290,41 @@ def _verify_checksum_in_headers(operation, operation_kwargs):
assert 'Content-MD5' in stub.requests[-1].headers
-def test_correct_url_used_for_s3():
+def _endpoint_urls():
# Test that given various sets of config options and bucket names,
# we construct the expect endpoint url.
- t = S3AddressingCases(_verify_expected_endpoint_url)
# The default behavior for sigv2. DNS compatible buckets
- t.case(region='us-west-2', bucket='bucket', key='key',
+ yield dict(region='us-west-2', bucket='bucket', key='key',
signature_version='s3',
expected_url='https://bucket.s3.us-west-2.amazonaws.com/key')
- t.case(region='us-east-1', bucket='bucket', key='key',
+ yield dict(region='us-east-1', bucket='bucket', key='key',
signature_version='s3',
expected_url='https://bucket.s3.amazonaws.com/key')
- t.case(region='us-west-1', bucket='bucket', key='key',
+ yield dict(region='us-west-1', bucket='bucket', key='key',
signature_version='s3',
expected_url='https://bucket.s3.us-west-1.amazonaws.com/key')
- t.case(region='us-west-1', bucket='bucket', key='key',
+ yield dict(region='us-west-1', bucket='bucket', key='key',
signature_version='s3', is_secure=False,
expected_url='http://bucket.s3.us-west-1.amazonaws.com/key')
# Virtual host addressing is independent of signature version.
- t.case(region='us-west-2', bucket='bucket', key='key',
+ yield dict(region='us-west-2', bucket='bucket', key='key',
signature_version='s3v4',
expected_url=(
'https://bucket.s3.us-west-2.amazonaws.com/key'))
- t.case(region='us-east-1', bucket='bucket', key='key',
+ yield dict(region='us-east-1', bucket='bucket', key='key',
signature_version='s3v4',
expected_url='https://bucket.s3.amazonaws.com/key')
- t.case(region='us-west-1', bucket='bucket', key='key',
+ yield dict(region='us-west-1', bucket='bucket', key='key',
signature_version='s3v4',
expected_url=(
'https://bucket.s3.us-west-1.amazonaws.com/key'))
- t.case(region='us-west-1', bucket='bucket', key='key',
+ yield dict(region='us-west-1', bucket='bucket', key='key',
signature_version='s3v4', is_secure=False,
expected_url=(
'http://bucket.s3.us-west-1.amazonaws.com/key'))
- t.case(
+ yield dict(
region='us-west-1', bucket='bucket-with-num-1', key='key',
signature_version='s3v4', is_secure=False,
expected_url='http://bucket-with-num-1.s3.us-west-1.amazonaws.com/key')
@@ -1334,188 +1332,188 @@ def test_correct_url_used_for_s3():
# Regions outside of the 'aws' partition.
# These should still default to virtual hosted addressing
# unless explicitly configured otherwise.
- t.case(region='cn-north-1', bucket='bucket', key='key',
+ yield dict(region='cn-north-1', bucket='bucket', key='key',
signature_version='s3v4',
expected_url=(
'https://bucket.s3.cn-north-1.amazonaws.com.cn/key'))
# This isn't actually supported because cn-north-1 is sigv4 only,
# but we'll still double check that our internal logic is correct
# when building the expected url.
- t.case(region='cn-north-1', bucket='bucket', key='key',
+ yield dict(region='cn-north-1', bucket='bucket', key='key',
signature_version='s3',
expected_url=(
'https://bucket.s3.cn-north-1.amazonaws.com.cn/key'))
# If the request is unsigned, we should have the default
# fix_s3_host behavior which is to use virtual hosting where
# possible but fall back to path style when needed.
- t.case(region='cn-north-1', bucket='bucket', key='key',
+ yield dict(region='cn-north-1', bucket='bucket', key='key',
signature_version=UNSIGNED,
expected_url=(
'https://bucket.s3.cn-north-1.amazonaws.com.cn/key'))
- t.case(region='cn-north-1', bucket='bucket.dot', key='key',
+ yield dict(region='cn-north-1', bucket='bucket.dot', key='key',
signature_version=UNSIGNED,
expected_url=(
'https://s3.cn-north-1.amazonaws.com.cn/bucket.dot/key'))
# And of course you can explicitly specify which style to use.
virtual_hosting = {'addressing_style': 'virtual'}
- t.case(region='cn-north-1', bucket='bucket', key='key',
+ yield dict(region='cn-north-1', bucket='bucket', key='key',
signature_version=UNSIGNED,
s3_config=virtual_hosting,
expected_url=(
'https://bucket.s3.cn-north-1.amazonaws.com.cn/key'))
path_style = {'addressing_style': 'path'}
- t.case(region='cn-north-1', bucket='bucket', key='key',
+ yield dict(region='cn-north-1', bucket='bucket', key='key',
signature_version=UNSIGNED,
s3_config=path_style,
expected_url=(
'https://s3.cn-north-1.amazonaws.com.cn/bucket/key'))
# If you don't have a DNS compatible bucket, we use path style.
- t.case(
+ yield dict(
region='us-west-2', bucket='bucket.dot', key='key',
expected_url='https://s3.us-west-2.amazonaws.com/bucket.dot/key')
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket.dot', key='key',
expected_url='https://s3.amazonaws.com/bucket.dot/key')
- t.case(
+ yield dict(
region='us-east-1', bucket='BucketName', key='key',
expected_url='https://s3.amazonaws.com/BucketName/key')
- t.case(
+ yield dict(
region='us-west-1', bucket='bucket_name', key='key',
expected_url='https://s3.us-west-1.amazonaws.com/bucket_name/key')
- t.case(
+ yield dict(
region='us-west-1', bucket='-bucket-name', key='key',
expected_url='https://s3.us-west-1.amazonaws.com/-bucket-name/key')
- t.case(
+ yield dict(
region='us-west-1', bucket='bucket-name-', key='key',
expected_url='https://s3.us-west-1.amazonaws.com/bucket-name-/key')
- t.case(
+ yield dict(
region='us-west-1', bucket='aa', key='key',
expected_url='https://s3.us-west-1.amazonaws.com/aa/key')
- t.case(
+ yield dict(
region='us-west-1', bucket='a'*64, key='key',
expected_url=('https://s3.us-west-1.amazonaws.com/%s/key' % ('a' * 64))
)
# Custom endpoint url should always be used.
- t.case(
+ yield dict(
customer_provided_endpoint='https://my-custom-s3/',
bucket='foo', key='bar',
expected_url='https://my-custom-s3/foo/bar')
- t.case(
+ yield dict(
customer_provided_endpoint='https://my-custom-s3/',
bucket='bucket.dots', key='bar',
expected_url='https://my-custom-s3/bucket.dots/bar')
# Doesn't matter what region you specify, a custom endpoint url always
# wins.
- t.case(
+ yield dict(
customer_provided_endpoint='https://my-custom-s3/',
region='us-west-2', bucket='foo', key='bar',
expected_url='https://my-custom-s3/foo/bar')
# Explicitly configuring "virtual" addressing_style.
virtual_hosting = {'addressing_style': 'virtual'}
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config=virtual_hosting,
expected_url='https://bucket.s3.amazonaws.com/key')
- t.case(
+ yield dict(
region='us-west-2', bucket='bucket', key='key',
s3_config=virtual_hosting,
expected_url='https://bucket.s3.us-west-2.amazonaws.com/key')
- t.case(
+ yield dict(
region='eu-central-1', bucket='bucket', key='key',
s3_config=virtual_hosting,
expected_url='https://bucket.s3.eu-central-1.amazonaws.com/key')
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config=virtual_hosting,
customer_provided_endpoint='https://foo.amazonaws.com',
expected_url='https://bucket.foo.amazonaws.com/key')
- t.case(
+ yield dict(
region='unknown', bucket='bucket', key='key',
s3_config=virtual_hosting,
expected_url='https://bucket.s3.unknown.amazonaws.com/key')
# Test us-gov with virtual addressing.
- t.case(
+ yield dict(
region='us-gov-west-1', bucket='bucket', key='key',
s3_config=virtual_hosting,
expected_url='https://bucket.s3.us-gov-west-1.amazonaws.com/key')
- t.case(
+ yield dict(
region='us-gov-west-1', bucket='bucket', key='key',
signature_version='s3',
expected_url='https://bucket.s3.us-gov-west-1.amazonaws.com/key')
- t.case(
+ yield dict(
region='fips-us-gov-west-1', bucket='bucket', key='key',
signature_version='s3',
expected_url='https://bucket.s3-fips.us-gov-west-1.amazonaws.com/key')
# Test path style addressing.
path_style = {'addressing_style': 'path'}
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config=path_style,
expected_url='https://s3.amazonaws.com/bucket/key')
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config=path_style,
customer_provided_endpoint='https://foo.amazonaws.com/',
expected_url='https://foo.amazonaws.com/bucket/key')
- t.case(
+ yield dict(
region='unknown', bucket='bucket', key='key',
s3_config=path_style,
expected_url='https://s3.unknown.amazonaws.com/bucket/key')
# S3 accelerate
use_accelerate = {'use_accelerate_endpoint': True}
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_accelerate,
expected_url='https://bucket.s3-accelerate.amazonaws.com/key')
- t.case(
+ yield dict(
# region is ignored with S3 accelerate.
region='us-west-2', bucket='bucket', key='key',
s3_config=use_accelerate,
expected_url='https://bucket.s3-accelerate.amazonaws.com/key')
# Provided endpoints still get recognized as accelerate endpoints.
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
customer_provided_endpoint='https://s3-accelerate.amazonaws.com',
expected_url='https://bucket.s3-accelerate.amazonaws.com/key')
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
customer_provided_endpoint='http://s3-accelerate.amazonaws.com',
expected_url='http://bucket.s3-accelerate.amazonaws.com/key')
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_accelerate, is_secure=False,
# Note we're using http:// because is_secure=False.
expected_url='http://bucket.s3-accelerate.amazonaws.com/key')
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
# s3-accelerate must be the first part of the url.
customer_provided_endpoint='https://foo.s3-accelerate.amazonaws.com',
expected_url='https://foo.s3-accelerate.amazonaws.com/bucket/key')
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
# The endpoint must be an Amazon endpoint.
customer_provided_endpoint='https://s3-accelerate.notamazon.com',
expected_url='https://s3-accelerate.notamazon.com/bucket/key')
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
# Extra components must be whitelisted.
customer_provided_endpoint='https://s3-accelerate.foo.amazonaws.com',
expected_url='https://s3-accelerate.foo.amazonaws.com/bucket/key')
- t.case(
+ yield dict(
region='unknown', bucket='bucket', key='key',
s3_config=use_accelerate,
expected_url='https://bucket.s3-accelerate.amazonaws.com/key')
# Use virtual even if path is specified for s3 accelerate because
# path style will not work with S3 accelerate.
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config={'use_accelerate_endpoint': True,
'addressing_style': 'path'},
@@ -1523,17 +1521,17 @@ def test_correct_url_used_for_s3():
# S3 dual stack endpoints.
use_dualstack = {'use_dualstack_endpoint': True}
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_dualstack, signature_version='s3',
# Still default to virtual hosted when possible on sigv2.
expected_url='https://bucket.s3.dualstack.us-east-1.amazonaws.com/key')
- t.case(
+ yield dict(
region=None, bucket='bucket', key='key',
s3_config=use_dualstack,
# Uses us-east-1 for no region set.
expected_url='https://bucket.s3.dualstack.us-east-1.amazonaws.com/key')
- t.case(
+ yield dict(
region='aws-global', bucket='bucket', key='key',
s3_config=use_dualstack,
# Pseudo-regions should not have any special resolving logic even when
@@ -1542,32 +1540,32 @@ def test_correct_url_used_for_s3():
# region name.
expected_url=(
'https://bucket.s3.dualstack.aws-global.amazonaws.com/key'))
- t.case(
+ yield dict(
region='us-west-2', bucket='bucket', key='key',
s3_config=use_dualstack, signature_version='s3',
# Still default to virtual hosted when possible on sigv2.
expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key')
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_dualstack, signature_version='s3v4',
expected_url='https://bucket.s3.dualstack.us-east-1.amazonaws.com/key')
- t.case(
+ yield dict(
region='us-west-2', bucket='bucket', key='key',
s3_config=use_dualstack, signature_version='s3v4',
expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key')
- t.case(
+ yield dict(
region='unknown', bucket='bucket', key='key',
s3_config=use_dualstack, signature_version='s3v4',
expected_url='https://bucket.s3.dualstack.unknown.amazonaws.com/key')
# Non DNS compatible buckets use path style for dual stack.
- t.case(
+ yield dict(
region='us-west-2', bucket='bucket.dot', key='key',
s3_config=use_dualstack,
# Still default to virtual hosted when possible.
expected_url=(
'https://s3.dualstack.us-west-2.amazonaws.com/bucket.dot/key'))
# Supports is_secure (use_ssl=False in create_client()).
- t.case(
+ yield dict(
region='us-west-2', bucket='bucket.dot', key='key', is_secure=False,
s3_config=use_dualstack,
# Still default to virtual hosted when possible.
@@ -1580,7 +1578,7 @@ def test_correct_url_used_for_s3():
'use_dualstack_endpoint': True,
'addressing_style': 'path',
}
- t.case(
+ yield dict(
region='us-west-2', bucket='bucket', key='key',
s3_config=force_path_style,
# Still default to virtual hosted when possible.
@@ -1591,32 +1589,32 @@ def test_correct_url_used_for_s3():
'use_accelerate_endpoint': True,
'use_dualstack_endpoint': True,
}
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_accelerate_dualstack,
expected_url=(
'https://bucket.s3-accelerate.dualstack.amazonaws.com/key'))
- t.case(
+ yield dict(
# Region is ignored with S3 accelerate.
region='us-west-2', bucket='bucket', key='key',
s3_config=use_accelerate_dualstack,
expected_url=(
'https://bucket.s3-accelerate.dualstack.amazonaws.com/key'))
# Only s3-accelerate overrides a customer endpoint.
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_dualstack,
customer_provided_endpoint='https://s3-accelerate.amazonaws.com',
expected_url=(
'https://bucket.s3-accelerate.amazonaws.com/key'))
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
# Dualstack is whitelisted.
customer_provided_endpoint=(
'https://s3-accelerate.dualstack.amazonaws.com'),
expected_url=(
'https://bucket.s3-accelerate.dualstack.amazonaws.com/key'))
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
# Even whitelisted parts cannot be duplicated.
customer_provided_endpoint=(
@@ -1624,7 +1622,7 @@ def test_correct_url_used_for_s3():
expected_url=(
'https://s3-accelerate.dualstack.dualstack'
'.amazonaws.com/bucket/key'))
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
# More than two extra parts is not allowed.
customer_provided_endpoint=(
@@ -1633,12 +1631,12 @@ def test_correct_url_used_for_s3():
expected_url=(
'https://s3-accelerate.dualstack.dualstack.dualstack.amazonaws.com'
'/bucket/key'))
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
# Extra components must be whitelisted.
customer_provided_endpoint='https://s3-accelerate.foo.amazonaws.com',
expected_url='https://s3-accelerate.foo.amazonaws.com/bucket/key')
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_accelerate_dualstack, is_secure=False,
# Note we're using http:// because is_secure=False.
@@ -1647,7 +1645,7 @@ def test_correct_url_used_for_s3():
# Use virtual even if path is specified for s3 accelerate because
# path style will not work with S3 accelerate.
use_accelerate_dualstack['addressing_style'] = 'path'
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config=use_accelerate_dualstack,
expected_url=(
@@ -1657,14 +1655,14 @@ def test_correct_url_used_for_s3():
accesspoint_arn = (
'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint'
)
- t.case(
+ yield dict(
region='us-west-2', bucket=accesspoint_arn, key='key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'us-west-2.amazonaws.com/key'
)
)
- t.case(
+ yield dict(
region='us-west-2', bucket=accesspoint_arn, key='key',
s3_config={'use_arn_region': True},
expected_url=(
@@ -1672,21 +1670,21 @@ def test_correct_url_used_for_s3():
'us-west-2.amazonaws.com/key'
)
)
- t.case(
+ yield dict(
region='us-west-2', bucket=accesspoint_arn, key='myendpoint/key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'us-west-2.amazonaws.com/myendpoint/key'
)
)
- t.case(
+ yield dict(
region='us-west-2', bucket=accesspoint_arn, key='foo/myendpoint/key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'us-west-2.amazonaws.com/foo/myendpoint/key'
)
)
- t.case(
+ yield dict(
# Note: The access-point arn has us-west-2 and the client's region is
# us-east-1, for the default case the access-point arn region is used.
region='us-east-1', bucket=accesspoint_arn, key='key',
@@ -1695,7 +1693,7 @@ def test_correct_url_used_for_s3():
'us-west-2.amazonaws.com/key'
)
)
- t.case(
+ yield dict(
region='us-east-1', bucket=accesspoint_arn, key='key',
s3_config={'use_arn_region': False},
expected_url=(
@@ -1703,14 +1701,14 @@ def test_correct_url_used_for_s3():
'us-east-1.amazonaws.com/key'
)
)
- t.case(
+ yield dict(
region='s3-external-1', bucket=accesspoint_arn, key='key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'us-west-2.amazonaws.com/key'
)
)
- t.case(
+ yield dict(
region='s3-external-1', bucket=accesspoint_arn, key='key',
s3_config={'use_arn_region': False},
expected_url=(
@@ -1718,14 +1716,14 @@ def test_correct_url_used_for_s3():
's3-external-1.amazonaws.com/key'
)
)
- t.case(
+ yield dict(
region='aws-global', bucket=accesspoint_arn, key='key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'us-west-2.amazonaws.com/key'
)
)
- t.case(
+ yield dict(
region='aws-global', bucket=accesspoint_arn, key='key',
s3_config={'use_arn_region': False},
expected_url=(
@@ -1733,7 +1731,7 @@ def test_correct_url_used_for_s3():
'aws-global.amazonaws.com/key'
)
)
- t.case(
+ yield dict(
region='unknown', bucket=accesspoint_arn, key='key',
s3_config={'use_arn_region': False},
expected_url=(
@@ -1741,7 +1739,7 @@ def test_correct_url_used_for_s3():
'unknown.amazonaws.com/key'
)
)
- t.case(
+ yield dict(
region='unknown', bucket=accesspoint_arn, key='key',
s3_config={'use_arn_region': True},
expected_url=(
@@ -1752,21 +1750,21 @@ def test_correct_url_used_for_s3():
accesspoint_arn_cn = (
'arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint'
)
- t.case(
+ yield dict(
region='cn-north-1', bucket=accesspoint_arn_cn, key='key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'cn-north-1.amazonaws.com.cn/key'
)
)
- t.case(
+ yield dict(
region='cn-northwest-1', bucket=accesspoint_arn_cn, key='key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'cn-north-1.amazonaws.com.cn/key'
)
)
- t.case(
+ yield dict(
region='cn-northwest-1', bucket=accesspoint_arn_cn, key='key',
s3_config={'use_arn_region': False},
expected_url=(
@@ -1777,21 +1775,21 @@ def test_correct_url_used_for_s3():
accesspoint_arn_gov = (
'arn:aws-us-gov:s3:us-gov-east-1:123456789012:accesspoint:myendpoint'
)
- t.case(
+ yield dict(
region='us-gov-east-1', bucket=accesspoint_arn_gov, key='key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'us-gov-east-1.amazonaws.com/key'
)
)
- t.case(
+ yield dict(
region='fips-us-gov-west-1', bucket=accesspoint_arn_gov, key='key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'us-gov-east-1.amazonaws.com/key'
)
)
- t.case(
+ yield dict(
region='fips-us-gov-west-1', bucket=accesspoint_arn_gov, key='key',
s3_config={'use_arn_region': False},
expected_url=(
@@ -1800,7 +1798,7 @@ def test_correct_url_used_for_s3():
)
)
- t.case(
+ yield dict(
region='us-west-2', bucket=accesspoint_arn, key='key', is_secure=False,
expected_url=(
'http://myendpoint-123456789012.s3-accesspoint.'
@@ -1808,7 +1806,7 @@ def test_correct_url_used_for_s3():
)
)
# Dual-stack with access-point arn
- t.case(
+ yield dict(
# Note: The access-point arn has us-west-2 and the client's region is
# us-east-1, for the default case the access-point arn region is used.
region='us-east-1', bucket=accesspoint_arn, key='key',
@@ -1820,7 +1818,7 @@ def test_correct_url_used_for_s3():
'us-west-2.amazonaws.com/key'
)
)
- t.case(
+ yield dict(
region='us-east-1', bucket=accesspoint_arn, key='key',
s3_config={
'use_dualstack_endpoint': True,
@@ -1831,7 +1829,7 @@ def test_correct_url_used_for_s3():
'us-east-1.amazonaws.com/key'
)
)
- t.case(
+ yield dict(
region='us-gov-east-1', bucket=accesspoint_arn_gov, key='key',
s3_config={
'use_dualstack_endpoint': True,
@@ -1844,7 +1842,7 @@ def test_correct_url_used_for_s3():
# None of the various s3 settings related to paths should affect what
# endpoint to use when an access-point is provided.
- t.case(
+ yield dict(
region='us-west-2', bucket=accesspoint_arn, key='key',
s3_config={'adressing_style': 'auto'},
expected_url=(
@@ -1852,7 +1850,7 @@ def test_correct_url_used_for_s3():
'us-west-2.amazonaws.com/key'
)
)
- t.case(
+ yield dict(
region='us-west-2', bucket=accesspoint_arn, key='key',
s3_config={'adressing_style': 'virtual'},
expected_url=(
@@ -1860,7 +1858,7 @@ def test_correct_url_used_for_s3():
'us-west-2.amazonaws.com/key'
)
)
- t.case(
+ yield dict(
region='us-west-2', bucket=accesspoint_arn, key='key',
s3_config={'adressing_style': 'path'},
expected_url=(
@@ -1873,27 +1871,27 @@ def test_correct_url_used_for_s3():
us_east_1_regional_endpoint = {
'us_east_1_regional_endpoint': 'regional'
}
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint,
expected_url=(
'https://bucket.s3.us-east-1.amazonaws.com/key'))
- t.case(
+ yield dict(
region='us-west-2', bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint,
expected_url=(
'https://bucket.s3.us-west-2.amazonaws.com/key'))
- t.case(
+ yield dict(
region=None, bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint,
expected_url=(
'https://bucket.s3.amazonaws.com/key'))
- t.case(
+ yield dict(
region='unknown', bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint,
expected_url=(
'https://bucket.s3.unknown.amazonaws.com/key'))
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config={
'us_east_1_regional_endpoint': 'regional',
@@ -1901,7 +1899,7 @@ def test_correct_url_used_for_s3():
},
expected_url=(
'https://bucket.s3.dualstack.us-east-1.amazonaws.com/key'))
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config={
'us_east_1_regional_endpoint': 'regional',
@@ -1909,7 +1907,7 @@ def test_correct_url_used_for_s3():
},
expected_url=(
'https://bucket.s3-accelerate.amazonaws.com/key'))
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config={
'us_east_1_regional_endpoint': 'regional',
@@ -1923,52 +1921,35 @@ def test_correct_url_used_for_s3():
us_east_1_regional_endpoint_legacy = {
'us_east_1_regional_endpoint': 'legacy'
}
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint_legacy,
expected_url=(
'https://bucket.s3.amazonaws.com/key'))
- t.case(
+ yield dict(
region=None, bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint_legacy,
expected_url=(
'https://bucket.s3.amazonaws.com/key'))
- t.case(
+ yield dict(
region='unknown', bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint_legacy,
expected_url=(
'https://bucket.s3.unknown.amazonaws.com/key'))
-class BaseTestCase:
- def __init__(self, verify_function):
- self._verify = verify_function
+@pytest.mark.parametrize("kwargs", _endpoint_urls())
+def test_expected_endpoint_url(kwargs):
+ _verify_expected_endpoint_url(**kwargs)
- def case(self, **kwargs):
- return self._verify, kwargs
-
-class S3AddressingCases(BaseTestCase):
- def case(self, region=None, bucket='bucket', key='key',
- s3_config=None, is_secure=True, customer_provided_endpoint=None,
- expected_url=None, signature_version=None):
- return (
- self._verify, region, bucket, key, s3_config, is_secure,
- customer_provided_endpoint, expected_url, signature_version
- )
-
-
-class S3ChecksumCases(BaseTestCase):
- def case(self, operation, operation_args):
- return self._verify, operation, operation_args
-
-
-def _verify_expected_endpoint_url(region, bucket, key, s3_config,
- is_secure=True,
- customer_provided_endpoint=None,
- expected_url=None, signature_version=None):
+def _verify_expected_endpoint_url(
+ region=None, bucket='bucket', key='key',
+ s3_config=None, is_secure=True, customer_provided_endpoint=None,
+ expected_url=None, signature_version=None,
+):
environ = {}
with mock.patch('os.environ', environ):
environ['AWS_ACCESS_KEY_ID'] = 'access_key'
@@ -2010,90 +1991,89 @@ def _create_s3_client(region, is_secure, endpoint_url, s3_config,
return s3
-def test_addressing_for_presigned_urls():
+def _presigned_urls():
# See TestGeneratePresigned for more detailed test cases
# on presigned URLs. Here's we're just focusing on the
# adddressing mode used for presigned URLs.
# We special case presigned URLs due to backwards
# compatibility.
- t = S3AddressingCases(_verify_presigned_url_addressing)
# us-east-1, or the "global" endpoint. A signature version of
# None means the user doesn't have signature version configured.
- t.case(region='us-east-1', bucket='bucket', key='key',
+ yield dict(region='us-east-1', bucket='bucket', key='key',
signature_version=None,
expected_url='https://bucket.s3.amazonaws.com/key')
- t.case(region='us-east-1', bucket='bucket', key='key',
+ yield dict(region='us-east-1', bucket='bucket', key='key',
signature_version='s3',
expected_url='https://bucket.s3.amazonaws.com/key')
- t.case(region='us-east-1', bucket='bucket', key='key',
+ yield dict(region='us-east-1', bucket='bucket', key='key',
signature_version='s3v4',
expected_url='https://bucket.s3.amazonaws.com/key')
- t.case(region='us-east-1', bucket='bucket', key='key',
+ yield dict(region='us-east-1', bucket='bucket', key='key',
signature_version='s3v4',
s3_config={'addressing_style': 'path'},
expected_url='https://s3.amazonaws.com/bucket/key')
# A region that supports both 's3' and 's3v4'.
- t.case(region='us-west-2', bucket='bucket', key='key',
+ yield dict(region='us-west-2', bucket='bucket', key='key',
signature_version=None,
expected_url='https://bucket.s3.amazonaws.com/key')
- t.case(region='us-west-2', bucket='bucket', key='key',
+ yield dict(region='us-west-2', bucket='bucket', key='key',
signature_version='s3',
expected_url='https://bucket.s3.amazonaws.com/key')
- t.case(region='us-west-2', bucket='bucket', key='key',
+ yield dict(region='us-west-2', bucket='bucket', key='key',
signature_version='s3v4',
expected_url='https://bucket.s3.amazonaws.com/key')
- t.case(region='us-west-2', bucket='bucket', key='key',
+ yield dict(region='us-west-2', bucket='bucket', key='key',
signature_version='s3v4',
s3_config={'addressing_style': 'path'},
expected_url='https://s3.us-west-2.amazonaws.com/bucket/key')
# An 's3v4' only region.
- t.case(region='us-east-2', bucket='bucket', key='key',
+ yield dict(region='us-east-2', bucket='bucket', key='key',
signature_version=None,
expected_url='https://bucket.s3.amazonaws.com/key')
- t.case(region='us-east-2', bucket='bucket', key='key',
+ yield dict(region='us-east-2', bucket='bucket', key='key',
signature_version='s3',
expected_url='https://bucket.s3.amazonaws.com/key')
- t.case(region='us-east-2', bucket='bucket', key='key',
+ yield dict(region='us-east-2', bucket='bucket', key='key',
signature_version='s3v4',
expected_url='https://bucket.s3.amazonaws.com/key')
- t.case(region='us-east-2', bucket='bucket', key='key',
+ yield dict(region='us-east-2', bucket='bucket', key='key',
signature_version='s3v4',
s3_config={'addressing_style': 'path'},
expected_url='https://s3.us-east-2.amazonaws.com/bucket/key')
# Dualstack endpoints
- t.case(
+ yield dict(
region='us-west-2', bucket='bucket', key='key',
signature_version=None,
s3_config={'use_dualstack_endpoint': True},
expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key')
- t.case(
+ yield dict(
region='us-west-2', bucket='bucket', key='key',
signature_version='s3',
s3_config={'use_dualstack_endpoint': True},
expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key')
- t.case(
+ yield dict(
region='us-west-2', bucket='bucket', key='key',
signature_version='s3v4',
s3_config={'use_dualstack_endpoint': True},
expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key')
# Accelerate
- t.case(region='us-west-2', bucket='bucket', key='key',
+ yield dict(region='us-west-2', bucket='bucket', key='key',
signature_version=None,
s3_config={'use_accelerate_endpoint': True},
expected_url='https://bucket.s3-accelerate.amazonaws.com/key')
# A region that we don't know about.
- t.case(region='us-west-50', bucket='bucket', key='key',
+ yield dict(region='us-west-50', bucket='bucket', key='key',
signature_version=None,
expected_url='https://bucket.s3.amazonaws.com/key')
# Customer provided URL results in us leaving the host untouched.
- t.case(region='us-west-2', bucket='bucket', key='key',
+ yield dict(region='us-west-2', bucket='bucket', key='key',
signature_version=None,
customer_provided_endpoint='https://foo.com/',
expected_url='https://foo.com/bucket/key')
@@ -2102,14 +2082,14 @@ def test_addressing_for_presigned_urls():
accesspoint_arn = (
'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint'
)
- t.case(
+ yield dict(
region='us-west-2', bucket=accesspoint_arn, key='key',
expected_url=(
'https://myendpoint-123456789012.s3-accesspoint.'
'us-west-2.amazonaws.com/key'
)
)
- t.case(
+ yield dict(
region='us-east-1', bucket=accesspoint_arn, key='key',
s3_config={'use_arn_region': False},
expected_url=(
@@ -2122,23 +2102,28 @@ def test_addressing_for_presigned_urls():
us_east_1_regional_endpoint = {
'us_east_1_regional_endpoint': 'regional'
}
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint, signature_version='s3',
expected_url=(
'https://bucket.s3.us-east-1.amazonaws.com/key'))
- t.case(
+ yield dict(
region='us-east-1', bucket='bucket', key='key',
s3_config=us_east_1_regional_endpoint, signature_version='s3v4',
expected_url=(
'https://bucket.s3.us-east-1.amazonaws.com/key'))
-def _verify_presigned_url_addressing(region, bucket, key, s3_config,
- is_secure=True,
- customer_provided_endpoint=None,
- expected_url=None,
- signature_version=None):
+@pytest.mark.parametrize("kwargs", _presigned_urls())
+def test_presigned_url_addressing(kwargs):
+ _verify_presigned_url_addressing(**kwargs)
+
+
+def _verify_presigned_url_addressing(
+ region=None, bucket='bucket', key='key',
+ s3_config=None, is_secure=True, customer_provided_endpoint=None,
+ expected_url=None, signature_version=None,
+):
s3 = _create_s3_client(region=region, is_secure=is_secure,
endpoint_url=customer_provided_endpoint,
s3_config=s3_config,
diff --git a/tests/functional/test_s3_control_redirects.py b/tests/functional/test_s3_control_redirects.py
index 566a0c2d..b5e35a49 100644
--- a/tests/functional/test_s3_control_redirects.py
+++ b/tests/functional/test_s3_control_redirects.py
@@ -11,6 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
+import pytest
from contextlib import contextmanager
from tests import unittest, mock, BaseSessionTest, ClientHTTPStubber
@@ -324,26 +325,34 @@ def _bootstrap_test_case_client(session, test_case):
return _bootstrap_client(session, region, config=config)
-def test_accesspoint_arn_redirection():
+def _accesspoint_arn_redirection_cases():
session = _bootstrap_session()
for test_case in ACCESSPOINT_ARN_TEST_CASES:
client, stubber = _bootstrap_test_case_client(session, test_case)
- yield _test_accesspoint_arn, test_case, client, stubber
+ yield test_case, client, stubber
-def _test_accesspoint_arn(test_case, client, stubber):
+@pytest.mark.parametrize(
+ 'test_case, client, stubber',
+ _accesspoint_arn_redirection_cases(),
+)
+def test_accesspoint_arn(test_case, client, stubber):
with _assert_test_case(test_case, client, stubber):
client.get_access_point_policy(Name=test_case['arn'])
-def test_bucket_arn_redirection():
+def _bucket_arn_redirection_cases():
session = _bootstrap_session()
for test_case in BUCKET_ARN_TEST_CASES:
client, stubber = _bootstrap_test_case_client(session, test_case)
- yield _test_bucket_arn, test_case, client, stubber
+ yield test_case, client, stubber
-def _test_bucket_arn(test_case, client, stubber):
+@pytest.mark.parametrize(
+ 'test_case, client, stubber',
+ _bucket_arn_redirection_cases(),
+)
+def test_bucket_arn(test_case, client, stubber):
with _assert_test_case(test_case, client, stubber):
client.get_bucket(Bucket=test_case['arn'])
diff --git a/tests/functional/test_six_imports.py b/tests/functional/test_six_imports.py
index 146a1f65..7ea71791 100644
--- a/tests/functional/test_six_imports.py
+++ b/tests/functional/test_six_imports.py
@@ -7,19 +7,20 @@ import pytest
ROOTDIR = os.path.dirname(botocore.__file__)
-@pytest.mark.parametrize("rootdir,dirnames,filenames", os.walk(ROOTDIR))
-def test_no_bare_six_imports(rootdir, dirnames, filenames):
- if 'vendored' in dirnames:
- # We don't need to lint our vendored packages.
- dirnames.remove('vendored')
- for filename in filenames:
- if not filename.endswith('.py'):
- continue
- fullname = os.path.join(rootdir, filename)
- _assert_no_bare_six_imports(fullname)
-
-
-def _assert_no_bare_six_imports(filename):
+def _all_py_files():
+ for rootdir, dirnames, filenames in os.walk(ROOTDIR):
+ if 'vendored' in dirnames:
+ # We don't need to lint our vendored packages.
+ dirnames.remove('vendored')
+ for filename in filenames:
+ if not filename.endswith('.py'):
+ continue
+ fullname = os.path.join(rootdir, filename)
+ yield fullname
+
+
+@pytest.mark.parametrize('filename', _all_py_files())
+def test_no_bare_six_imports(filename):
with open(filename) as f:
contents = f.read()
parsed = ast.parse(contents, filename)
diff --git a/tests/functional/test_waiter_config.py b/tests/functional/test_waiter_config.py
index 01782007..0e4318bc 100644
--- a/tests/functional/test_waiter_config.py
+++ b/tests/functional/test_waiter_config.py
@@ -83,28 +83,38 @@ WAITER_SCHEMA = {
}
-@pytest.mark.parametrize("service_name", botocore.session.get_session().get_available_services())
-def test_lint_waiter_configs(service_name):
+def _all_waiter_models():
session = botocore.session.get_session()
+ for service_name in session.get_available_services():
+ try:
+ # We use the loader directly here because we need the entire
+ # json document, not just the portions exposed (either
+ # internally or externally) by the WaiterModel class.
+ loader = session.get_component('data_loader')
+ waiter_model = loader.load_service_model(service_name, 'waiters-2')
+ yield waiter_model
+ except UnknownServiceError:
+ # The service doesn't have waiters
+ continue
+
+
+@pytest.mark.parametrize("waiter_model", _all_waiter_models())
+def test_validate_waiter_schema(waiter_model):
validator = Draft4Validator(WAITER_SCHEMA)
- client = session.create_client(service_name, 'us-east-1')
- service_model = client.meta.service_model
- try:
- # We use the loader directly here because we need the entire
- # json document, not just the portions exposed (either
- # internally or externally) by the WaiterModel class.
- loader = session.get_component('data_loader')
- waiter_model = loader.load_service_model(
- service_name, 'waiters-2')
- except UnknownServiceError:
- # The service doesn't have waiters
- return
_validate_schema(validator, waiter_model)
- for waiter_name in client.waiter_names:
- _lint_single_waiter(client, waiter_name, service_model)
-def _lint_single_waiter(client, waiter_name, service_model):
+def _all_waiters():
+ session = botocore.session.get_session()
+ for service_name in session.get_available_services():
+ client = session.create_client(service_name, 'us-east-1')
+ service_model = client.meta.service_model
+ for waiter_name in client.waiter_names:
+ yield client, waiter_name, service_model
+
+
+@pytest.mark.parametrize("client, waiter_name, service_model", _all_waiters())
+def test_lint_waiter_configs(client, waiter_name, service_model):
try:
waiter = client.get_waiter(waiter_name)
# The 'acceptors' property is dynamic and will create
--
2.29.2