Remove Python3.9 specific mycroft patches

This commit is contained in:
j1nx 2021-04-07 12:25:22 +02:00
parent 417c07ba75
commit 0209d97216
3 changed files with 0 additions and 178 deletions

View File

@ -1,41 +0,0 @@
From 002b16ba1f217a0b57e2d89ef51bda0fd94ab4f4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=C3=85ke=20Forslund?= <ake.forslund@gmail.com>
Date: Sat, 6 Mar 2021 11:11:14 +0100
Subject: [PATCH] Replace multiprocessing with concurrent.futures
This uses the ThreadPoolExecutor from concurrent.futures instead of
multiprocessings threadpool since the threadpool in multiprocessing
can't safely be used in a multithreaded context in Python 3.9+
---
msm/mycroft_skills_manager.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/msm/mycroft_skills_manager.py b/msm/mycroft_skills_manager.py
index 82500b4..a1ae41f 100644
--- a/msm/mycroft_skills_manager.py
+++ b/msm/mycroft_skills_manager.py
@@ -23,12 +23,12 @@
MSM can be used on the command line but is also used by Mycroft core daemons.
"""
+from concurrent.futures import ThreadPoolExecutor
import time
import logging
import shutil
from functools import wraps
from glob import glob
-from multiprocessing.pool import ThreadPool
from os import path
from typing import Dict, List
@@ -487,8 +487,8 @@ def run_item(skill):
func.__name__, skill.name
))
- with ThreadPool(max_threads) as tp:
- return tp.map(run_item, skills)
+ with ThreadPoolExecutor(max_threads) as executor:
+ return executor.map(run_item, skills)
@save_device_skill_state
def install_defaults(self):

View File

@ -1,38 +0,0 @@
From 9761899ea4d897b2c878d9a7ac300a611a0e4609 Mon Sep 17 00:00:00 2001
From: j1nx <p.steenbergen@j1nx.nl>
Date: Mon, 5 Apr 2021 17:05:47 +0200
Subject: [PATCH 1/1] Workaround of Python3.9+ multiprocessing issues
---
mycroft/util/process_utils.py | 15 +++++++++++++++
1 file changed, 15 insertions(+)
diff --git a/mycroft/util/process_utils.py b/mycroft/util/process_utils.py
index 81f5c90f59..d272b42729 100644
--- a/mycroft/util/process_utils.py
+++ b/mycroft/util/process_utils.py
@@ -10,6 +10,21 @@ from time import sleep
from .log import LOG
+def initialize_mp_context():
+ """Handle import behaviour changes in python 3.9+
+ Pre-import colliding multiprocessing parts.
+ This should be considered a workaround and not a solution of the issue.
+ """
+ LOG.info('Preloading multiprocessing internals...')
+ import multiprocessing.popen_spawn_posix # noqa
+ import multiprocessing.queues # noqa
+ import multiprocessing.resource_tracker # noqa
+ import multiprocessing.synchronize # noqa
+
+
+initialize_mp_context()
+
+
def reset_sigint_handler():
"""Reset the sigint handler to the default.
--
2.20.1

View File

@ -1,99 +0,0 @@
From 4841ba2166107b14f2206ea4ae0cf7c68c62be1b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=C3=85ke=20Forslund?= <ake.forslund@gmail.com>
Date: Sat, 6 Mar 2021 11:30:05 +0100
Subject: [PATCH 1/2] Replace multiprocessing with concurrent.futures
Multiprocessing pools can't safely be used in a threaded context in
Python 3.9+. This replaces the multiprocessing Pool with a
ProcessPoolExecutor from concurrent.futures.
Handle shutdown implicitly
---
padatious/training_manager.py | 15 +++++++++------
1 file changed, 9 insertions(+), 6 deletions(-)
diff --git a/padatious/training_manager.py b/padatious/training_manager.py
index 3bef33c..61f2edd 100644
--- a/padatious/training_manager.py
+++ b/padatious/training_manager.py
@@ -11,9 +11,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-import multiprocessing as mp
+from concurrent.futures import ProcessPoolExecutor
+from concurrent.futures import TimeoutError
from functools import partial
-from multiprocessing.context import TimeoutError
from os.path import join, isfile, isdir, splitext
import padatious
@@ -95,15 +95,18 @@ def train(self, debug=True, single_thread=False, timeout=20):
train(i)
else:
# Train in multiple processes to disk
- pool = mp.Pool()
+ pool = ProcessPoolExecutor()
try:
- pool.map_async(train, self.objects_to_train).get(timeout)
+ _ = list(pool.map(train,
+ self.objects_to_train,
+ timeout=timeout))
+
except TimeoutError:
if debug:
print('Some objects timed out while training')
finally:
- pool.close()
- pool.join()
+ pass
+ # No explicit shutdown, let the it complete in the background.
# Load saved objects from disk
for obj in self.objects_to_train:
From f56b2944d1d470e28b089e814cdaab388619926e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=C3=85ke=20Forslund?= <ake.forslund@gmail.com>
Date: Sat, 3 Apr 2021 11:15:37 +0200
Subject: [PATCH 2/2] Add proper subprocess timeout
---
padatious/intent_container.py | 18 +++++++++++-------
1 file changed, 11 insertions(+), 7 deletions(-)
diff --git a/padatious/intent_container.py b/padatious/intent_container.py
index b020bf7..8a5544d 100644
--- a/padatious/intent_container.py
+++ b/padatious/intent_container.py
@@ -18,7 +18,7 @@
import padaos
import sys
from functools import wraps
-from subprocess import call, check_output
+from subprocess import call, check_output, TimeoutExpired
from threading import Thread
from padatious.match_data import MatchData
@@ -261,12 +261,16 @@ def train_subprocess(self, *args, **kwargs):
Returns:
bool: True for success, False if timed out
"""
- ret = call([
- sys.executable, '-m', 'padatious', 'train', self.cache_dir,
- '-d', json.dumps(self.serialized_args),
- '-a', json.dumps(args),
- '-k', json.dumps(kwargs),
- ])
+ try:
+ ret = call([
+ sys.executable, '-m', 'padatious', 'train', self.cache_dir,
+ '-d', json.dumps(self.serialized_args),
+ '-a', json.dumps(args),
+ '-k', json.dumps(kwargs),
+ ], timeout=kwargs.get('timeout'))
+ except TimeoutExpired:
+ ret = 10 # Treat process timeout as internal timeout
+
if ret == 2:
raise TypeError(
'Invalid train arguments: {} {}'.format(