调整批量停止任务为并行任务
This commit is contained in:
@@ -734,8 +734,8 @@ def delete_last_message():
|
||||
@app.route("/stopAllTask", methods=['POST'])
|
||||
def stopAllTask():
|
||||
idList = request.get_json()
|
||||
code, msg = ThreadManager.batch_stop(idList)
|
||||
return ResultData(code, [], msg).toJson()
|
||||
code, msg, data = ThreadManager.batch_stop(idList)
|
||||
return ResultData(code, data, msg).toJson()
|
||||
|
||||
# 切换账号
|
||||
@app.route('/changeAccount', methods=['POST'])
|
||||
|
||||
@@ -4,6 +4,7 @@ import threading
|
||||
import ctypes
|
||||
import inspect
|
||||
import time
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from typing import Dict, Optional, List, Tuple, Any
|
||||
|
||||
from Utils.LogManager import LogManager
|
||||
@@ -172,36 +173,54 @@ class ThreadManager:
|
||||
return 1001, "failed"
|
||||
|
||||
@classmethod
|
||||
def batch_stop(cls, udids: List[str], join_timeout_each: float = 2.0,
|
||||
retries_each: int = 5, wait_step_each: float = 0.2) -> Tuple[int, str]:
|
||||
def batch_stop(cls, udids: List[str]) -> Tuple[int, str, List[str]]:
|
||||
"""
|
||||
先全量执行一遍 -> 记录失败 -> 对失败重试 3 轮(每轮间隔 1 秒)
|
||||
全部完成后统一返回:
|
||||
全成功 : (200, "停止任务成功")
|
||||
仍有失败: (1001, "停止任务失败")
|
||||
并行批量停止(简化版):
|
||||
- 只接收 udids 参数
|
||||
- 其他参数写死:join_timeout=2.0, retries=5, wait_step=0.2
|
||||
- 所有设备同时执行,失败的重试 3 轮,每轮间隔 1 秒
|
||||
- 返回:
|
||||
(200, "停止任务成功", [])
|
||||
(1001, "停止任务失败", [失败udid...])
|
||||
"""
|
||||
udids = udids or []
|
||||
if not udids:
|
||||
return 200, "停止任务成功", []
|
||||
|
||||
join_timeout = 2.0
|
||||
retries = 5
|
||||
wait_step = 0.2
|
||||
retry_rounds = 3
|
||||
round_interval = 1.0
|
||||
|
||||
def _stop_one(u: str) -> Tuple[str, bool]:
|
||||
ok = cls._stop_once(u, join_timeout, retries, wait_step)
|
||||
return u, ok
|
||||
|
||||
# === 第一轮:并行执行所有设备 ===
|
||||
fail: List[str] = []
|
||||
with ThreadPoolExecutor(max_workers=len(udids)) as pool:
|
||||
futures = [pool.submit(_stop_one, u) for u in udids]
|
||||
for f in as_completed(futures):
|
||||
u, ok = f.result()
|
||||
if not ok:
|
||||
fail.append(u)
|
||||
|
||||
# 第一轮
|
||||
for u in udids:
|
||||
ok = cls._stop_once(u, join_timeout_each, retries_each, wait_step_each)
|
||||
if not ok:
|
||||
fail.append(u)
|
||||
|
||||
# 三轮只对失败重试
|
||||
for _ in range(3):
|
||||
# === 对失败的设备重试 3 轮(每轮间隔 1 秒) ===
|
||||
for _ in range(retry_rounds):
|
||||
if not fail:
|
||||
break
|
||||
time.sleep(1.0)
|
||||
time.sleep(round_interval)
|
||||
remain: List[str] = []
|
||||
for u in fail:
|
||||
ok = cls._stop_once(u, join_timeout_each, retries_each, wait_step_each)
|
||||
if not ok:
|
||||
remain.append(u)
|
||||
with ThreadPoolExecutor(max_workers=len(fail)) as pool:
|
||||
futures = [pool.submit(_stop_one, u) for u in fail]
|
||||
for f in as_completed(futures):
|
||||
u, ok = f.result()
|
||||
if not ok:
|
||||
remain.append(u)
|
||||
fail = remain
|
||||
|
||||
# === 返回结果 ===
|
||||
if not fail:
|
||||
return 200, "停止任务成功"
|
||||
return 200, "停止任务成功", []
|
||||
else:
|
||||
return 1001, "停止任务失败"
|
||||
return 1001, "停止任务失败", fail
|
||||
Binary file not shown.
Reference in New Issue
Block a user