diff --git a/python-adapt-parser.changes b/python-adapt-parser.changes index 6c47584..d41ece6 100644 --- a/python-adapt-parser.changes +++ b/python-adapt-parser.changes @@ -1,3 +1,18 @@ +------------------------------------------------------------------- +Wed Oct 19 11:27:15 UTC 2022 - Daniel Garcia + +- Fix sitelib in files section + +------------------------------------------------------------------- +Wed Oct 19 10:59:31 UTC 2022 - Daniel Garcia + +- Add patch remove-python-six.patch to do not require python-six +- Remove python-six dependency +- Remove not needed deps: + * unittest-xml-reporting + * python-pyee +- Use pytest in check instead unittest + ------------------------------------------------------------------- Tue Nov 2 10:26:21 UTC 2021 - pgajdos@suse.com diff --git a/python-adapt-parser.spec b/python-adapt-parser.spec index 199c87d..1a692f0 100644 --- a/python-adapt-parser.spec +++ b/python-adapt-parser.spec @@ -1,7 +1,7 @@ # # spec file for package python-adapt-parser # -# Copyright (c) 2021 SUSE LLC +# Copyright (c) 2022 SUSE LLC # # All modifications and additions to the file contributed by third parties # remain the property of their copyright owners, unless otherwise agreed @@ -25,14 +25,12 @@ License: Apache-2.0 Group: Development/Languages/Python URL: https://github.com/MycroftAI/adapt Source: https://github.com/MycroftAI/adapt/archive/refs/tags/release/v%{version}.tar.gz -BuildRequires: %{python_module pyee >= 8.1.0} +# PATCH-FIX-OPENSUSE remove-python-six.patch +Patch: remove-python-six.patch +BuildRequires: %{python_module pytest} BuildRequires: %{python_module setuptools} -BuildRequires: %{python_module six >= 1.10.0} -BuildRequires: %{python_module unittest-xml-reporting} BuildRequires: fdupes BuildRequires: python-rpm-macros -Requires: python-pyee >= 8.1.0 -Requires: python-six >= 1.10.0 BuildArch: noarch %python_subpackages @@ -42,7 +40,7 @@ determination framework. It is intended to parse natural language text into a structured intent that can then be invoked programatically. %prep -%setup -q -n adapt-release-v%{version} +%autosetup -p1 -n adapt-release-v%{version} sed -i -s "s/==/>=/" requirements.txt %build @@ -53,11 +51,12 @@ sed -i -s "s/==/>=/" requirements.txt %python_expand %fdupes %{buildroot}%{$python_sitelib} %check -%pyunittest test/*.py +%pytest test/*.py %files %{python_files} %license LICENSE.md %doc README.md SUPPORT.md GETTING_STARTED.md -%{python_sitelib}/* +%{python_sitelib}/adapt +%{python_sitelib}/adapt_parser-%{version}*-info %changelog diff --git a/remove-python-six.patch b/remove-python-six.patch new file mode 100644 index 0000000..2ea2c18 --- /dev/null +++ b/remove-python-six.patch @@ -0,0 +1,103 @@ +Index: adapt-release-v1.0.0/adapt/context.py +=================================================================== +--- adapt-release-v1.0.0.orig/adapt/context.py ++++ adapt-release-v1.0.0/adapt/context.py +@@ -16,8 +16,6 @@ + """ + Context Management code for Adapt (where context ~= persistent session state). + """ +-from six.moves import xrange +- + __author__ = "seanfitz, Art McGee" + + +@@ -129,7 +127,7 @@ class ContextManager(object): + + missing_entities = list(missing_entities) + context = [] +- for i in xrange(max_frames): ++ for i in range(max_frames): + frame_entities = [entity.copy() for entity in self.frame_stack[i].entities] + for entity in frame_entities: + entity['confidence'] = entity.get('confidence', 1.0) / (2.0 + i) +Index: adapt-release-v1.0.0/adapt/entity_tagger.py +=================================================================== +--- adapt-release-v1.0.0.orig/adapt/entity_tagger.py ++++ adapt-release-v1.0.0/adapt/entity_tagger.py +@@ -14,7 +14,6 @@ + # + + from adapt.tools.text.trie import Trie +-from six.moves import xrange + + __author__ = 'seanfitz' + +@@ -40,8 +39,8 @@ class EntityTagger(object): + Yields: + str: ? + """ +- for start_idx in xrange(len(tokens)): +- for end_idx in xrange(start_idx + 1, len(tokens) + 1): ++ for start_idx in range(len(tokens)): ++ for end_idx in range(start_idx + 1, len(tokens) + 1): + yield ' '.join(tokens[start_idx:end_idx]), start_idx + + def _sort_and_merge_tags(self, tags): +@@ -85,7 +84,7 @@ class EntityTagger(object): + additional_sort = len(entities) > 0 + + context_entities = [] +- for i in xrange(len(tokens)): ++ for i in range(len(tokens)): + part = ' '.join(tokens[i:]) + + for new_entity in self.trie.gather(part): +Index: adapt-release-v1.0.0/adapt/expander.py +=================================================================== +--- adapt-release-v1.0.0.orig/adapt/expander.py ++++ adapt-release-v1.0.0/adapt/expander.py +@@ -13,8 +13,6 @@ + # limitations under the License. + # + +-from six.moves import xrange +- + __author__ = 'seanfitz' + + +@@ -211,14 +209,14 @@ class BronKerboschExpander(object): + + """ + graph = SimpleGraph() +- for tag_index in xrange(len(tags)): +- for entity_index in xrange(len(tags[tag_index].get('entities'))): ++ for tag_index in range(len(tags)): ++ for entity_index in range(len(tags[tag_index].get('entities'))): + a_entity_name = graph_key_from_tag(tags[tag_index], entity_index) + tokens = self.tokenizer.tokenize(tags[tag_index].get('entities', [])[entity_index].get('match')) + for tag in tags[tag_index + 1:]: + start_token = tag.get('start_token') + if start_token >= tags[tag_index].get('start_token') + len(tokens): +- for b_entity_index in xrange(len(tag.get('entities'))): ++ for b_entity_index in range(len(tag.get('entities'))): + b_entity_name = graph_key_from_tag(tag, b_entity_index) + graph.add_edge(a_entity_name, b_entity_name) + +@@ -238,7 +236,7 @@ class BronKerboschExpander(object): + + # name entities + for tag in tags: +- for entity_index in xrange(len(tag.get('entities'))): ++ for entity_index in range(len(tag.get('entities'))): + node_name = graph_key_from_tag(tag, entity_index) + if not node_name in entities: + entities[node_name] = [] +@@ -283,7 +281,7 @@ class BronKerboschExpander(object): + def end_token_index(): + return max([t.get('end_token') for t in overlapping_spans]) + +- for i in xrange(len(tags)): ++ for i in range(len(tags)): + tag = tags[i] + + if len(overlapping_spans) > 0 and end_token_index() >= tag.get('start_token'):