Initial YakPanel commit
This commit is contained in:
0
mod/project/node/__init__.py
Normal file
0
mod/project/node/__init__.py
Normal file
17
mod/project/node/dbutil/__init__.py
Normal file
17
mod/project/node/dbutil/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from .load_db import LoadSite, HttpNode, TcpNode, NodeDB
|
||||
from .node_db import Node, ServerNodeDB, ServerMonitorRepo, NodeAPPKey
|
||||
from .file_transfer_db import FileTransfer, FileTransferDB, FileTransferTask
|
||||
# from .executor import Script, ScriptGroup, ExecutorDB, ExecutorLog, ExecutorTask
|
||||
from .node_task_flow import Script, Flow, CommandTask, CommandLog, TransferFile, TransferLog, TaskFlowsDB, \
|
||||
TransferTask, FlowTemplates
|
||||
|
||||
# 初始化数据库
|
||||
try:
|
||||
NodeDB().init_db()
|
||||
ServerNodeDB().init_db()
|
||||
FileTransferDB().init_db()
|
||||
# ExecutorDB().init_db()
|
||||
TaskFlowsDB().init_db()
|
||||
except Exception as e:
|
||||
import public
|
||||
public.print_error()
|
||||
481
mod/project/node/dbutil/executor.py
Normal file
481
mod/project/node/dbutil/executor.py
Normal file
@@ -0,0 +1,481 @@
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Optional, List, Dict, Tuple, Any, Union, Type, Generic, TypeVar, TextIO
|
||||
import sqlite3
|
||||
import json
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
import public
|
||||
import db
|
||||
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
|
||||
@dataclass
|
||||
class Script:
|
||||
"""对应scripts表"""
|
||||
name: str
|
||||
script_type: str
|
||||
content: str
|
||||
id: Optional[int] = None
|
||||
description: Optional[str] = None
|
||||
group_id: int = 0
|
||||
created_at: Optional[datetime] = None
|
||||
updated_at: Optional[datetime] = None
|
||||
|
||||
@staticmethod
|
||||
def check(data: Dict[str, Any]) -> str:
|
||||
if "script_type" not in data or not data["script_type"]:
|
||||
return "Script type cannot be empty"
|
||||
if not data["script_type"] in ["python", "shell"]:
|
||||
return "Script type error, please choose Python or Shell"
|
||||
if "content" not in data or not data["content"]:
|
||||
return "Script content cannot be empty"
|
||||
if "name" not in data or not data["name"]:
|
||||
return "Script name cannot be empty"
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'Script':
|
||||
"""从字典创建Script实例"""
|
||||
return cls(
|
||||
id=int(data['id']) if data.get('id', None) else None,
|
||||
name=str(data['name']),
|
||||
script_type=str(data['script_type']),
|
||||
content=str(data['content']),
|
||||
description=str(data['description']) if data.get('description', None) else None,
|
||||
group_id=int(data['group_id']) if data.get('group_id', None) else 0,
|
||||
created_at=datetime.fromisoformat(data['created_at']) if data.get('created_at', None) else None,
|
||||
updated_at=datetime.fromisoformat(data['updated_at']) if data.get('updated_at', None) else None
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""转换为字典格式"""
|
||||
return {
|
||||
'id': self.id,
|
||||
'name': self.name,
|
||||
'script_type': self.script_type,
|
||||
'content': self.content,
|
||||
'description': self.description,
|
||||
'group_id': self.group_id,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScriptGroup:
|
||||
"""对应script_groups表"""
|
||||
name: str
|
||||
id: Optional[int] = None
|
||||
description: Optional[str] = None
|
||||
created_at: Optional[datetime] = None
|
||||
|
||||
@staticmethod
|
||||
def check(data: Dict[str, Any]) -> str:
|
||||
if "name" not in data or not data["name"]:
|
||||
return "Script group name cannot be empty"
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'ScriptGroup':
|
||||
"""从字典创建ScriptGroup实例"""
|
||||
return cls(
|
||||
id=int(data['id']) if data.get('id', None) else None,
|
||||
name=str(data['name']),
|
||||
description=str(data['description']) if data.get('description', None) else None,
|
||||
created_at=datetime.fromisoformat(data['created_at']) if data.get('created_at', None) else None
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""转换为字典格式"""
|
||||
return {
|
||||
'id': self.id,
|
||||
'name': self.name,
|
||||
'description': self.description,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExecutorTask:
|
||||
"""对应executor_tasks表"""
|
||||
script_id: int
|
||||
script_content: str
|
||||
script_type: str
|
||||
server_ids: str = ""
|
||||
id: Optional[int] = None
|
||||
created_at: Optional[datetime] = None
|
||||
updated_at: Optional[datetime] = None
|
||||
_elogs: Optional[List["ExecutorLog"]] = None
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'ExecutorTask':
|
||||
"""从字典创建ExecutorTask实例"""
|
||||
return cls(
|
||||
id=int(data['id']) if data.get('id', None) else None,
|
||||
script_id=int(data['script_id']),
|
||||
script_content=str(data['script_content']),
|
||||
script_type=str(data['script_type']),
|
||||
created_at=datetime.fromisoformat(data['created_at']) if data.get('created_at', None) else None,
|
||||
updated_at=datetime.fromisoformat(data['updated_at']) if data.get('updated_at', None) else None
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""转换为字典格式"""
|
||||
return {
|
||||
'id': self.id,
|
||||
'script_id': self.script_id,
|
||||
'server_ids': self.server_ids,
|
||||
'script_content': self.script_content,
|
||||
'script_type': self.script_type,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
@property
|
||||
def elogs(self) -> List["ExecutorLog"]:
|
||||
if self._elogs is None:
|
||||
return []
|
||||
return self._elogs
|
||||
|
||||
@elogs.setter
|
||||
def elogs(self, elogs: List["ExecutorLog"]):
|
||||
self._elogs = elogs
|
||||
|
||||
|
||||
_EXECUTOR_LOG_DIR = public.get_panel_path() + "/logs/executor_log/"
|
||||
try:
|
||||
if not os.path.exists(_EXECUTOR_LOG_DIR):
|
||||
os.makedirs(_EXECUTOR_LOG_DIR)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExecutorLog:
|
||||
"""对应executor_logs表"""
|
||||
executor_task_id: int
|
||||
server_id: int
|
||||
ssh_host: str
|
||||
id: Optional[int] = None
|
||||
status: int = 0 # 0:运行中 1:成功 2:失败 3:异常
|
||||
log_name: Optional[str] = None
|
||||
created_at: Optional[datetime] = None
|
||||
updated_at: Optional[datetime] = None
|
||||
_log_fp: Optional[TextIO] = None
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'ExecutorLog':
|
||||
"""从字典创建ExecutorLog实例"""
|
||||
return cls(
|
||||
id=int(data['id']) if data.get('id', None) else None,
|
||||
executor_task_id=int(data['executor_task_id']),
|
||||
server_id=int(data['server_id']),
|
||||
ssh_host=str(data['ssh_host']),
|
||||
status=int(data['status']) if data.get('status', 0) else 0,
|
||||
log_name=str(data['log_name']) if data.get('log_name', None) else None,
|
||||
created_at=datetime.fromisoformat(data['created_at']) if data.get('created_at', None) else None,
|
||||
updated_at=datetime.fromisoformat(data['updated_at']) if data.get('updated_at', None) else None
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""转换为字典格式"""
|
||||
return {
|
||||
'id': self.id,
|
||||
'executor_task_id': self.executor_task_id,
|
||||
'server_id': self.server_id,
|
||||
'ssh_host': self.ssh_host,
|
||||
'status': self.status,
|
||||
'log_name': self.log_name,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
@property
|
||||
def log_file(self):
|
||||
return os.path.join(_EXECUTOR_LOG_DIR, self.log_name)
|
||||
|
||||
@property
|
||||
def log_fp(self):
|
||||
if self._log_fp is None:
|
||||
self._log_fp = open(self.log_file, "w+")
|
||||
return self._log_fp
|
||||
|
||||
def create_log(self):
|
||||
public.writeFile(self.log_file, "")
|
||||
|
||||
def remove_log(self):
|
||||
if os.path.exists(self.log_file):
|
||||
os.remove(self.log_file)
|
||||
|
||||
def get_log(self):
|
||||
return public.readFile(self.log_file)
|
||||
|
||||
def write_log(self, log_data: str, is_end_log=False):
|
||||
self.log_fp.write(log_data)
|
||||
self.log_fp.flush()
|
||||
if is_end_log:
|
||||
self.log_fp.close()
|
||||
self._log_fp = None
|
||||
|
||||
|
||||
_TableType = TypeVar("_TableType", bound=Union[Script, ScriptGroup, ExecutorTask, ExecutorLog])
|
||||
|
||||
|
||||
class _Table(Generic[_TableType]):
|
||||
"""数据库表"""
|
||||
table_name: str = ""
|
||||
data_cls: Type[_TableType]
|
||||
|
||||
def __init__(self, db_obj: db.Sql):
|
||||
self._db = db_obj
|
||||
|
||||
# 当仅传递一个数据时,返回插入数的 id或错误信息; 当传递多个数据时,返回插入的行数或错误信息
|
||||
def create(self,
|
||||
data: Union[_TableType, List[_TableType]]) -> Union[int, str]:
|
||||
"""创建数据"""
|
||||
if not isinstance(data, list):
|
||||
data = [data]
|
||||
|
||||
if not len(data):
|
||||
raise ValueError("Data cannot be empty")
|
||||
if not isinstance(data[0], self.data_cls):
|
||||
raise ValueError("Data type error")
|
||||
|
||||
now = datetime.now().isoformat()
|
||||
|
||||
def fileter_data(item):
|
||||
item_dict = item.to_dict()
|
||||
if "id" in item_dict:
|
||||
item_dict.pop("id")
|
||||
if "created_at" in item_dict and item_dict["created_at"] is None:
|
||||
item_dict["created_at"] = now
|
||||
if "updated_at" in item_dict and item_dict["updated_at"] is None:
|
||||
item_dict["updated_at"] = now
|
||||
return item_dict
|
||||
|
||||
data_list = list(map(fileter_data, data))
|
||||
if len(data_list) == 1:
|
||||
try:
|
||||
res = self._db.table(self.table_name).insert(data_list[0])
|
||||
if isinstance(res, int):
|
||||
return res
|
||||
return str(res)
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
try:
|
||||
res = self._db.table(self.table_name).batch_insert(data_list)
|
||||
if isinstance(res, (int, bool)):
|
||||
return len(data)
|
||||
return str(res)
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
def update(self, data: _TableType) -> str:
|
||||
"""更新数据"""
|
||||
if not isinstance(data, self.data_cls):
|
||||
raise ValueError("Data type error")
|
||||
data_dict = data.to_dict()
|
||||
data_dict.pop('created_at', None)
|
||||
if "updated_at" in data_dict:
|
||||
data_dict["updated_at"] = datetime.now().isoformat()
|
||||
if "id" not in data_dict:
|
||||
raise ValueError("The data ID cannot be empty")
|
||||
try:
|
||||
self._db.table(self.table_name).where("id=?", (data_dict["id"],)).update(data_dict)
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
return ""
|
||||
|
||||
def get_byid(self, data_id: int) -> Optional[_TableType]:
|
||||
"""根据id获取数据"""
|
||||
try:
|
||||
result = self._db.table(self.table_name).where("id=?", (data_id,)).find()
|
||||
except Exception as e:
|
||||
return None
|
||||
if not result:
|
||||
return None
|
||||
return self.data_cls.from_dict(result)
|
||||
|
||||
def delete(self, data_id: Union[int, List[int]]):
|
||||
"""删除数据"""
|
||||
if isinstance(data_id, list):
|
||||
data_id = [int(item) for item in data_id]
|
||||
elif isinstance(data_id, int):
|
||||
data_id = [int(data_id)]
|
||||
else:
|
||||
return "数据id类型错误"
|
||||
try:
|
||||
self._db.table(self.table_name).where(
|
||||
"id in ({})".format(",".join(["?"] * len(data_id))), (*data_id,)
|
||||
).delete()
|
||||
return ""
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
def query(self, *args) -> List[_TableType]:
|
||||
"""查询数据"""
|
||||
try:
|
||||
result = self._db.table(self.table_name).where(*args).select()
|
||||
except Exception as e:
|
||||
return []
|
||||
if not result:
|
||||
return []
|
||||
return [self.data_cls.from_dict(item) for item in result]
|
||||
|
||||
def query_page(self, *args, page_num: int = 1, limit: int = 10) -> List[_TableType]:
|
||||
"""查询数据, 支持分页"""
|
||||
try:
|
||||
offset = limit * (page_num - 1)
|
||||
result = self._db.table(self.table_name).where(*args).limit(limit, offset).order("id DESC").select()
|
||||
except Exception as e:
|
||||
public.print_error()
|
||||
return []
|
||||
if not result:
|
||||
return []
|
||||
return [self.data_cls.from_dict(item) for item in result]
|
||||
|
||||
def count(self, *args) -> int:
|
||||
"""查询数据数量"""
|
||||
try:
|
||||
result = self._db.table(self.table_name).where(*args).count()
|
||||
except Exception as e:
|
||||
return 0
|
||||
return result
|
||||
|
||||
def find(self, *args) -> Optional[_TableType]:
|
||||
"""查询单条数据"""
|
||||
try:
|
||||
result = self._db.table(self.table_name).where(*args).find()
|
||||
except Exception as e:
|
||||
return None
|
||||
if not result:
|
||||
return None
|
||||
return self.data_cls.from_dict(result)
|
||||
|
||||
|
||||
class _ScriptTable(_Table[Script]):
|
||||
"""脚本表"""
|
||||
table_name = "scripts"
|
||||
data_cls = Script
|
||||
|
||||
def set_group_id(self, group_id: int, *where_args) -> str:
|
||||
"""设置脚本组"""
|
||||
try:
|
||||
self._db.table(self.table_name).where(where_args).update({"group_id": group_id})
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
return ""
|
||||
|
||||
|
||||
class _ScriptGroupTable(_Table[ScriptGroup]):
|
||||
"""脚本组表"""
|
||||
table_name = "script_groups"
|
||||
data_cls = ScriptGroup
|
||||
default_group = ScriptGroup(
|
||||
id=0,
|
||||
name="default",
|
||||
description="Default grouping, use this grouping when not set",
|
||||
created_at=datetime.now(),
|
||||
)
|
||||
|
||||
def all_group(self) -> List[ScriptGroup]:
|
||||
"""获取所有脚本组"""
|
||||
try:
|
||||
result = self._db.table(self.table_name).select()
|
||||
except Exception as e:
|
||||
return []
|
||||
if not result:
|
||||
return []
|
||||
return [self.default_group] + [self.data_cls.from_dict(item) for item in result]
|
||||
|
||||
|
||||
class _ExecutorTaskTable(_Table[ExecutorTask]):
|
||||
"""执行任务表"""
|
||||
table_name = "executor_tasks"
|
||||
data_cls = ExecutorTask
|
||||
|
||||
def query_tasks(self,
|
||||
page=1, size=10, node_id: int = None, script_type: str = None, search: str = None
|
||||
) -> Tuple[int, List[ExecutorTask]]:
|
||||
"""查询任务"""
|
||||
where_args, parms = [], []
|
||||
if script_type and script_type != "all":
|
||||
where_args.append("script_type=?")
|
||||
parms.append(script_type)
|
||||
if search:
|
||||
search_str = "script_content like ?"
|
||||
parms.append("%{}%".format(search))
|
||||
|
||||
stable = _ScriptTable(self._db)
|
||||
data = stable.query("name like ? or description like ?", ("%{}%".format(search), "%{}%".format(search)))
|
||||
if data:
|
||||
search_str += " or script_id in ({})".format(",".join(["?"] * len(data)))
|
||||
where_args.append("(" + search_str + ")")
|
||||
parms.append(tuple([item.id for item in data]))
|
||||
else:
|
||||
where_args.append(search_str)
|
||||
|
||||
if node_id:
|
||||
where_args.append("server_ids like ?")
|
||||
parms.append("%|{}%".format(node_id))
|
||||
|
||||
|
||||
# public.print_log("search criteria: {}".format(" AND ".join(where_args)), parms)
|
||||
count = self.count(
|
||||
" AND ".join(where_args),
|
||||
(*parms, )
|
||||
)
|
||||
|
||||
return count, self.query_page(
|
||||
" AND ".join(where_args),
|
||||
(*parms, ),
|
||||
page_num=page,
|
||||
limit=size
|
||||
)
|
||||
|
||||
|
||||
class _ExecutorLogTable(_Table[ExecutorLog]):
|
||||
"""执行日志表"""
|
||||
table_name = "executor_logs"
|
||||
data_cls = ExecutorLog
|
||||
|
||||
|
||||
class ExecutorDB:
|
||||
_DB_FILE = public.get_panel_path() + "/data/db/executor.db"
|
||||
_DB_INIT_FILE = os.path.dirname(__file__) + "/executor.sql"
|
||||
|
||||
def __init__(self):
|
||||
sql = db.Sql()
|
||||
sql._Sql__DB_FILE = self._DB_FILE
|
||||
self.db = sql
|
||||
self.Script = _ScriptTable(self.db)
|
||||
self.ScriptGroup = _ScriptGroupTable(self.db)
|
||||
self.ExecutorTask = _ExecutorTaskTable(self.db)
|
||||
self.ExecutorLog = _ExecutorLogTable(self.db)
|
||||
|
||||
def init_db(self):
|
||||
sql_data = public.readFile(self._DB_INIT_FILE)
|
||||
if not os.path.exists(self._DB_FILE) or os.path.getsize(self._DB_FILE) == 0:
|
||||
public.writeFile(self._DB_FILE, "")
|
||||
import sqlite3
|
||||
conn = sqlite3.connect(self._DB_FILE)
|
||||
cursor = conn.cursor()
|
||||
cursor.executescript(sql_data)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
def close(self):
|
||||
self.db.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_trackback):
|
||||
self.close()
|
||||
60
mod/project/node/dbutil/executor.sql
Normal file
60
mod/project/node/dbutil/executor.sql
Normal file
@@ -0,0 +1,60 @@
|
||||
|
||||
-- 创建脚本表
|
||||
CREATE TABLE IF NOT EXISTS scripts (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL CHECK(length(name) <= 255),
|
||||
script_type TEXT NOT NULL CHECK(length(script_type) <= 255),
|
||||
content TEXT NOT NULL,
|
||||
description TEXT CHECK(length(description) <= 255),
|
||||
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
group_id INTEGER NOT NULL DEFAULT 0
|
||||
);
|
||||
|
||||
-- 创建脚本组表
|
||||
CREATE TABLE IF NOT EXISTS script_groups (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL CHECK(length(name) <= 255),
|
||||
description TEXT CHECK(length(description) <= 255),
|
||||
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- 创建执行任务表
|
||||
CREATE TABLE IF NOT EXISTS executor_tasks (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
server_ids TEXT NOT NULL,
|
||||
script_id INTEGER NOT NULL,
|
||||
script_content TEXT NOT NULL,
|
||||
script_type TEXT NOT NULL CHECK(length(script_type) <= 255),
|
||||
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- 创建执行日志表
|
||||
CREATE TABLE IF NOT EXISTS executor_logs (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
executor_task_id INTEGER NOT NULL,
|
||||
server_id INTEGER NOT NULL,
|
||||
ssh_host TEXT NOT NULL,
|
||||
status INTEGER NOT NULL DEFAULT 0 CHECK(status IN (0,1,2,3)),
|
||||
log_name TEXT CHECK(length(log_name) <= 255),
|
||||
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
|
||||
-- 创建索引(分开创建以避免SQLite语法错误)
|
||||
-- 脚本表索引
|
||||
CREATE INDEX IF NOT EXISTS idx_scripts_name ON scripts(name);
|
||||
CREATE INDEX IF NOT EXISTS idx_scripts_script_type ON scripts(script_type);
|
||||
CREATE INDEX IF NOT EXISTS idx_scripts_group_id ON scripts(group_id);
|
||||
|
||||
-- 脚本组索引
|
||||
CREATE INDEX IF NOT EXISTS idx_script_groups_name ON script_groups(name);
|
||||
|
||||
-- 执行任务索引
|
||||
CREATE INDEX IF NOT EXISTS idx_executor_tasks_script_id ON executor_tasks(script_id);
|
||||
|
||||
-- 执行日志索引
|
||||
CREATE INDEX IF NOT EXISTS idx_executor_logs_task_server ON executor_logs(executor_task_id, server_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_executor_logs_status ON executor_logs(status);
|
||||
36
mod/project/node/dbutil/file_transfer.sql
Normal file
36
mod/project/node/dbutil/file_transfer.sql
Normal file
@@ -0,0 +1,36 @@
|
||||
-- 传输任务表
|
||||
CREATE TABLE IF NOT EXISTS transfer_tasks
|
||||
(
|
||||
task_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
source_node TEXT NOT NULL DEFAULT '{}', -- {"address":"https:/xxxx", "api_key":"xxxxx", "name":"xxxx"}
|
||||
target_node TEXT NOT NULL DEFAULT '{}', -- {"address":"https:/xxxx", "api_key":"xxxxx", "name":"xxxx"}
|
||||
source_path_list TEXT NOT NULL DEFAULT '[]', -- 源节点上的路径 [{"path":"/www/wwwroot/aaaa", "is_dir":true}]
|
||||
target_path TEXT NOT NULL, -- 目标节点上的路径
|
||||
task_action TEXT NOT NULL, -- upload/download
|
||||
status TEXT NOT NULL, -- pending/running/completed/failed
|
||||
default_mode TEXT NOT NULL, -- 默认处理模式 cover: 覆盖,ignore: 跳过,rename:重命名
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
started_at TIMESTAMP,
|
||||
completed_at TIMESTAMP,
|
||||
created_by TEXT NOT NULL, -- 创建的节点名称
|
||||
target_task_id INTEGER NOT NULL,
|
||||
is_source_node BOOLEAN NOT NULL, -- 是否为本节点发送
|
||||
is_target_node BOOLEAN NOT NULL -- 是否为本节点接收
|
||||
);
|
||||
|
||||
-- 文件传输详情表
|
||||
CREATE TABLE IF NOT EXISTS file_transfers
|
||||
(
|
||||
transfer_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
task_id INTEGER NOT NULL,
|
||||
src_file TEXT NOT NULL, -- 源文件
|
||||
dst_file TEXT NOT NULL, -- 目标文件
|
||||
file_size INTEGER NOT NULL, -- 文件大小
|
||||
is_dir INTEGER NOT NULL DEFAULT 0,
|
||||
status TEXT NOT NULL, -- pending/running/completed/failed
|
||||
progress INTEGER DEFAULT 0, -- 0-100
|
||||
message TEXT NOT NULL DEFAULT '',
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
started_at TIMESTAMP,
|
||||
completed_at TIMESTAMP
|
||||
);
|
||||
328
mod/project/node/dbutil/file_transfer_db.py
Normal file
328
mod/project/node/dbutil/file_transfer_db.py
Normal file
@@ -0,0 +1,328 @@
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Optional, List, Dict, Tuple
|
||||
import sqlite3
|
||||
import json
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
import public
|
||||
import db
|
||||
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileTransferTask:
|
||||
task_id: Optional[int] = None
|
||||
source_node: dict = field(default_factory=lambda: {})
|
||||
target_node: dict = field(default_factory=lambda: {})
|
||||
source_path_list: list = field(default_factory=lambda: []) # [{"path":"/www/wwwroot/aaaa", "is_dir":true}]
|
||||
target_path: str = ""
|
||||
task_action: str = "" # upload/download
|
||||
status: str = "pending" # pending/running/completed/failed
|
||||
default_mode: str = "cover" # 默认处理模式 cover: 覆盖,ignore: 跳过,rename:重命名
|
||||
created_at: Optional[datetime] = None
|
||||
started_at: Optional[datetime] = None
|
||||
completed_at: Optional[datetime] = None
|
||||
created_by: str = ""
|
||||
target_task_id: int = 0
|
||||
is_source_node: bool = False
|
||||
is_target_node: bool = False
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, row: dict) -> 'FileTransferTask':
|
||||
source_node = row.get("source_node", "{}")
|
||||
if isinstance(source_node, str):
|
||||
source_node = json.loads(source_node)
|
||||
elif isinstance(source_node, dict):
|
||||
source_node = source_node
|
||||
else:
|
||||
source_node = {}
|
||||
|
||||
target_node = row.get("target_node", "{}")
|
||||
if isinstance(target_node, str):
|
||||
target_node = json.loads(target_node)
|
||||
elif isinstance(target_node, dict):
|
||||
target_node = target_node
|
||||
else:
|
||||
target_node = {}
|
||||
|
||||
source_path_list = row.get("source_path_list", "[]")
|
||||
if isinstance(source_path_list, str):
|
||||
source_path_list = json.loads(source_path_list)
|
||||
elif isinstance(source_path_list, list):
|
||||
source_path_list = source_path_list
|
||||
else:
|
||||
source_path_list = []
|
||||
|
||||
return cls(
|
||||
task_id=row.get("task_id", None),
|
||||
source_node=source_node,
|
||||
target_node=target_node,
|
||||
source_path_list=source_path_list,
|
||||
target_path=row.get("target_path", ""),
|
||||
task_action=row.get("task_action", ""),
|
||||
status=row.get("status", ""),
|
||||
default_mode=row.get("default_mode", "cover"),
|
||||
created_at=datetime.fromisoformat(row.get("created_at")) if row.get("created_at", "") else None,
|
||||
started_at=datetime.fromisoformat(row.get("started_at")) if row.get("started_at", "") else None,
|
||||
completed_at=datetime.fromisoformat(row.get("completed_at")) if row.get("completed_at", "") else None,
|
||||
created_by=row.get("created_by", ""),
|
||||
target_task_id=row.get("target_task_id", 0),
|
||||
is_source_node=row.get("is_source_node", False),
|
||||
is_target_node=row.get("is_target_node", False)
|
||||
)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"task_id": self.task_id,
|
||||
"source_node": self.source_node,
|
||||
"target_node": self.target_node,
|
||||
"source_path_list": self.source_path_list,
|
||||
"target_path": self.target_path,
|
||||
"task_action": self.task_action,
|
||||
"status": self.status,
|
||||
"default_mode": self.default_mode,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"started_at": self.started_at.isoformat() if self.started_at else None,
|
||||
"completed_at": self.completed_at.isoformat() if self.completed_at else None,
|
||||
"created_by": self.created_by,
|
||||
"target_task_id": self.target_task_id,
|
||||
"is_source_node": self.is_source_node,
|
||||
"is_target_node": self.is_target_node
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileTransfer:
|
||||
transfer_id: Optional[int] = None
|
||||
task_id: int = 0
|
||||
src_file: str = ""
|
||||
dst_file: str = ""
|
||||
file_size: int = 0
|
||||
is_dir: int = 0
|
||||
status: str = "" # pending/running/completed/failed
|
||||
progress: int = 0
|
||||
message: str = ""
|
||||
created_at: Optional[datetime] = None
|
||||
started_at: Optional[datetime] = None
|
||||
completed_at: Optional[datetime] = None
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, row: dict) -> 'FileTransfer':
|
||||
return cls(
|
||||
transfer_id=row.get("transfer_id", None),
|
||||
task_id=row.get("task_id", 0),
|
||||
src_file=row.get("src_file", ""),
|
||||
dst_file=row.get("dst_file", ""),
|
||||
file_size=row.get("file_size", 0),
|
||||
is_dir=row.get("is_dir", 0),
|
||||
status=row.get("status", ""),
|
||||
progress=row.get("progress", 0),
|
||||
message=row.get("message", ""),
|
||||
created_at=datetime.fromisoformat(row.get("created_at")) if row.get("created_at", "") else None,
|
||||
started_at=datetime.fromisoformat(row.get("started_at")) if row.get("started_at", "") else None,
|
||||
completed_at=datetime.fromisoformat(row.get("completed_at")) if row.get("completed_at", "") else None
|
||||
)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"transfer_id": self.transfer_id,
|
||||
"task_id": self.task_id,
|
||||
"src_file": self.src_file,
|
||||
"dst_file": self.dst_file,
|
||||
"file_size": self.file_size,
|
||||
"is_dir": self.is_dir,
|
||||
"status": self.status,
|
||||
"progress": self.progress,
|
||||
"message": self.message,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"started_at": self.started_at.isoformat() if self.started_at else None,
|
||||
"completed_at": self.completed_at.isoformat() if self.completed_at else None
|
||||
}
|
||||
|
||||
|
||||
# SQLite 操作类
|
||||
class FileTransferDB:
|
||||
_DB_FILE = public.get_panel_path() + "/data/db/node_file_transfer.db"
|
||||
_DB_INIT_FILE = os.path.dirname(__file__) + "/file_transfer.sql"
|
||||
|
||||
def __init__(self):
|
||||
sql = db.Sql()
|
||||
sql._Sql__DB_FILE = self._DB_FILE
|
||||
self.db = sql
|
||||
|
||||
def init_db(self):
|
||||
sql_data = public.readFile(self._DB_INIT_FILE)
|
||||
if not os.path.exists(self._DB_FILE) or os.path.getsize(self._DB_FILE) == 0:
|
||||
public.writeFile(self._DB_FILE, "")
|
||||
import sqlite3
|
||||
conn = sqlite3.connect(self._DB_FILE)
|
||||
c = conn.cursor()
|
||||
c.executescript(sql_data)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
def close(self):
|
||||
self.db.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_trackback):
|
||||
self.close()
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
def create_task(self, task: FileTransferTask) -> str:
|
||||
task_data = task.to_dict()
|
||||
task_data.pop('task_id', None)
|
||||
task_data.pop('created_at', None)
|
||||
task_data["source_node"] = json.dumps(task_data["source_node"])
|
||||
task_data["target_node"] = json.dumps(task_data["target_node"])
|
||||
task_data["source_path_list"] = json.dumps(task_data["source_path_list"])
|
||||
try:
|
||||
err = self.db.table("transfer_tasks").insert(task_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
elif isinstance(err, int):
|
||||
task.task_id = err
|
||||
return ""
|
||||
except Exception as e:
|
||||
return f"Database operation error: {str(e)}"
|
||||
|
||||
def update_task(self, task: FileTransferTask) -> str:
|
||||
task_data = task.to_dict()
|
||||
task_data.pop('created_at', None)
|
||||
task_data["source_node"] = json.dumps(task_data["source_node"])
|
||||
task_data["target_node"] = json.dumps(task_data["target_node"])
|
||||
task_data["source_path_list"] = json.dumps(task_data["source_path_list"])
|
||||
if not task.task_id:
|
||||
return "task_id is required"
|
||||
try:
|
||||
err = self.db.table("transfer_tasks").where("task_id = ?", task.task_id).update(task_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
return ""
|
||||
except Exception as e:
|
||||
return f"Database operation error: {str(e)}"
|
||||
|
||||
def get_task(self, task_id: int) -> Tuple[Optional[dict], str]:
|
||||
result = self.db.table("transfer_tasks").where("task_id = ?", task_id).find()
|
||||
if isinstance(result, str):
|
||||
return None, result
|
||||
if self.db.ERR_INFO:
|
||||
return None, self.db.ERR_INFO
|
||||
return result, ""
|
||||
|
||||
def get_last_task(self) -> Tuple[Optional[dict], str]:
|
||||
result = self.db.table("transfer_tasks").order("task_id DESC").limit(1).find()
|
||||
if isinstance(result, str):
|
||||
return None, result
|
||||
if self.db.ERR_INFO:
|
||||
return None, self.db.ERR_INFO
|
||||
return result, ""
|
||||
|
||||
def delete_task(self, task_id: int) -> str:
|
||||
result = self.db.table("transfer_tasks").where("task_id = ?", task_id).delete()
|
||||
if isinstance(result, str):
|
||||
return result
|
||||
return ""
|
||||
|
||||
def get_all_tasks(self, offset: int = 0, limit: int = 100) -> List[dict]:
|
||||
results = self.db.table("transfer_tasks").limit(limit, offset).select()
|
||||
if isinstance(results, list):
|
||||
return results
|
||||
return []
|
||||
|
||||
def count_tasks(self) -> int:
|
||||
return self.db.table("transfer_tasks").count()
|
||||
|
||||
def create_file_transfer(self, transfer: FileTransfer) -> str:
|
||||
transfer_data = transfer.to_dict()
|
||||
transfer_data.pop('transfer_id', None)
|
||||
transfer_data.pop('created_at', None)
|
||||
try:
|
||||
err = self.db.table("file_transfers").insert(transfer_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
return ""
|
||||
except Exception as e:
|
||||
return f"Database operation error: {str(e)}"
|
||||
|
||||
def update_file_transfer(self, transfer: FileTransfer) -> str:
|
||||
transfer_data = transfer.to_dict()
|
||||
if not transfer.transfer_id:
|
||||
return "transfer_id is required"
|
||||
try:
|
||||
err = self.db.table("file_transfers").where("transfer_id = ?", transfer.transfer_id).update(transfer_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
return ""
|
||||
except Exception as e:
|
||||
return f"Database operation error: {str(e)}"
|
||||
|
||||
def get_file_transfer(self, transfer_id: int) -> Optional[dict]:
|
||||
result = self.db.table("file_transfers").where("transfer_id = ?", transfer_id).find()
|
||||
if isinstance(result, str):
|
||||
return None
|
||||
if self.db.ERR_INFO:
|
||||
return None
|
||||
return result
|
||||
|
||||
def get_task_file_transfers(self, task_id: int) -> List[dict]:
|
||||
results = self.db.table("file_transfers").where("task_id = ?", task_id).select()
|
||||
if isinstance(results, list):
|
||||
return results
|
||||
return []
|
||||
|
||||
def batch_create_file_transfers(self, transfers: List[FileTransfer]) -> str:
|
||||
"""
|
||||
批量创建文件传输记录
|
||||
|
||||
Args:
|
||||
transfers: FileTransfer 对象列表
|
||||
|
||||
Returns:
|
||||
str: 错误信息,如果成功则返回空字符串
|
||||
"""
|
||||
if not transfers:
|
||||
return ""
|
||||
|
||||
try:
|
||||
# 准备批量插入的数据
|
||||
transfer_data_list = []
|
||||
for transfer in transfers:
|
||||
transfer_data = transfer.to_dict()
|
||||
transfer_data.pop('transfer_id', None)
|
||||
transfer_data['created_at'] = datetime.now().isoformat()
|
||||
transfer_data_list.append(transfer_data)
|
||||
|
||||
# 执行批量插入
|
||||
err = self.db.table("file_transfers").batch_insert(transfer_data_list)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
return ""
|
||||
except Exception as e:
|
||||
return f"Batch creation of file transfer records failed: {str(e)}"
|
||||
|
||||
# 获取上一个任务所有文件传输状态
|
||||
def last_task_all_status(self) -> Tuple[Dict, str]:
|
||||
last_task, err = self.get_last_task()
|
||||
if err:
|
||||
return {}, err
|
||||
if not last_task:
|
||||
return {}, ""
|
||||
|
||||
task = FileTransferTask.from_dict(last_task)
|
||||
file_list = self.get_task_file_transfers(task.task_id)
|
||||
return {
|
||||
"task": task.to_dict(),
|
||||
"file_list": file_list,
|
||||
}, ""
|
||||
54
mod/project/node/dbutil/load_balancer.sql
Normal file
54
mod/project/node/dbutil/load_balancer.sql
Normal file
@@ -0,0 +1,54 @@
|
||||
-- load_sites 负载均衡网站
|
||||
CREATE TABLE IF NOT EXISTS `load_sites`
|
||||
(
|
||||
`load_id` INTEGER PRIMARY KEY AUTOINCREMENT, -- 负载均衡ID
|
||||
`name` TEXT NOT NULL UNIQUE, -- 负载均衡名称
|
||||
`site_id` INTEGER NOT NULL DEFAULT 0, -- 站点ID
|
||||
`site_name` TEXT NOT NULL , -- 站点名称,网站主域名
|
||||
`site_type` TEXT NOT NULL DEFAULT 'http', -- http, tcp (http:代表http负载均衡,tcp:代表tcp/udp负载均衡)
|
||||
`ps` TEXT NOT NULL DEFAULT '',
|
||||
`http_config` TEXT NOT NULL DEFAULT '{"proxy_next_upstream":"error timeout http_500 http_502 http_503 http_504","http_alg":"sticky_cookie"}',
|
||||
`tcp_config` TEXT NOT NULL DEFAULT '{"proxy_connect_timeout":8,"proxy_timeout":86400,"host":"127.0.0.1","port":80,"type":"tcp"}',
|
||||
`created_at` TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- http_nodes
|
||||
CREATE TABLE IF NOT EXISTS `http_nodes`
|
||||
(
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`load_id` INTEGER NOT NULL DEFAULT 0, -- 负载均衡ID
|
||||
`node_id` INTEGER NOT NULL DEFAULT 0, -- 节点ID
|
||||
`node_site_id` INTEGER NOT NULL DEFAULT 0, -- 节点上的网站ID
|
||||
`node_site_name` TEXT NOT NULL DEFAULT '', -- 节点上的网站名称
|
||||
`port` INTEGER NOT NULL DEFAULT 0, -- 端口
|
||||
`location` TEXT NOT NULL DEFAULT '/', -- 实施代理的路由, 默认是根路由 '/' 当前版本也只支持根路由
|
||||
`path` TEXT NOT NULL DEFAULT '/', -- 访问验证路径
|
||||
`node_status` TEXT NOT NULL DEFAULT 'online', -- 节点状态 online, backup, down
|
||||
`weight` INTEGER NOT NULL DEFAULT 1, -- 权重
|
||||
`max_fail` INTEGER NOT NULL DEFAULT 0, -- 最大失败次数
|
||||
`fail_timeout` INTEGER NOT NULL DEFAULT 0, -- 失败恢复时间
|
||||
`max_conns` INTEGER NOT NULL DEFAULT 0, -- 最大连接数
|
||||
`ps` TEXT NOT NULL DEFAULT '',
|
||||
`created_at` TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- tcp_nodes
|
||||
CREATE TABLE IF NOT EXISTS `tcp_nodes`
|
||||
(
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`load_id` INTEGER NOT NULL DEFAULT 0, -- 负载均衡ID
|
||||
`node_id` INTEGER NOT NULL DEFAULT 0, -- 节点ID
|
||||
`host` TEXT NOT NULL,
|
||||
`port` INTEGER NOT NULL DEFAULT 0,
|
||||
`node_status` TEXT NOT NULL DEFAULT 'online', -- 节点状态 online, backup, down
|
||||
`weight` INTEGER NOT NULL DEFAULT 1,
|
||||
`max_fail` INTEGER NOT NULL DEFAULT 0,
|
||||
`fail_timeout` INTEGER NOT NULL DEFAULT 0,
|
||||
`ps` TEXT NOT NULL DEFAULT '',
|
||||
`created_at` TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS `load_sites_name` ON `load_sites` (`name`);
|
||||
CREATE INDEX IF NOT EXISTS `load_sites_site_type` ON `load_sites` (`site_type`);
|
||||
CREATE INDEX IF NOT EXISTS `http_nodes_load_id` ON `http_nodes` (`load_id`);
|
||||
CREATE INDEX IF NOT EXISTS `tcp_nodes_load_id` ON `tcp_nodes` (`load_id`);
|
||||
449
mod/project/node/dbutil/load_db.py
Normal file
449
mod/project/node/dbutil/load_db.py
Normal file
@@ -0,0 +1,449 @@
|
||||
import json
|
||||
import os.path
|
||||
import re
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Tuple, Optional, List, Union
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
import public
|
||||
import db
|
||||
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
|
||||
@dataclass
|
||||
class LoadSite:
|
||||
name: str
|
||||
site_name: str
|
||||
site_type: str
|
||||
ps: str = ''
|
||||
http_config: dict = field(default_factory=lambda: {
|
||||
"proxy_next_upstream": "error timeout http_500 http_502 http_503 http_504",
|
||||
"http_alg": "sticky_cookie",
|
||||
"proxy_cache_status": False,
|
||||
"cache_time": "1d",
|
||||
"cache_suffix": "css,js,jpg,jpeg,gif,png,webp,woff,eot,ttf,svg,ico,css.map,js.map",
|
||||
})
|
||||
tcp_config: dict = field(default_factory=lambda: {
|
||||
"proxy_connect_timeout": 8,
|
||||
"proxy_timeout": 86400,
|
||||
"host": "127.0.0.1",
|
||||
"port": 80,
|
||||
"type": "tcp"
|
||||
})
|
||||
created_at: int = 0
|
||||
load_id: int = 0
|
||||
site_id: int = 0
|
||||
|
||||
@classmethod
|
||||
def bind_http_load(cls, data: dict) -> Tuple[Optional["LoadSite"], str]:
|
||||
check_msg = cls.base_check(data)
|
||||
if check_msg:
|
||||
return None, check_msg
|
||||
if not data.get('site_name', None):
|
||||
return None, 'site_name is required'
|
||||
if not public.is_domain(data['site_name']):
|
||||
return None, 'site_name is invalid'
|
||||
if not isinstance(data.get('http_config', None), dict):
|
||||
return None, 'http_config is required'
|
||||
else:
|
||||
if "proxy_cache_status" not in dict.keys(data['http_config']): #兼容旧版本数据
|
||||
data['http_config']["proxy_cache_status"] = False
|
||||
data['http_config']["cache_time"] = "1d"
|
||||
data['http_config']["cache_suffix"] = "css,js,jpg,jpeg,gif,png,webp,woff,eot,ttf,svg,ico,css.map,js.map"
|
||||
for k in ['proxy_next_upstream', 'http_alg', "proxy_cache_status", "cache_time", "cache_suffix"]:
|
||||
if k not in dict.keys(data['http_config']):
|
||||
return None, 'http_config.{} is required'.format(k)
|
||||
for i in data['http_config']['proxy_next_upstream'].split():
|
||||
if i not in ('error', 'timeout') and not re.match(r'^http_\d{3}$', i):
|
||||
return None, 'http_config.proxy_next_upstream is invalid'
|
||||
if data['http_config']['http_alg'] not in ('sticky_cookie', 'round_robin', 'least_conn', 'ip_hash'):
|
||||
return None, 'http_config.http_alg is invalid'
|
||||
if not isinstance(data['http_config']['proxy_cache_status'], bool):
|
||||
return None, 'http_config.proxy_cache_status is invalid'
|
||||
if not isinstance(data['http_config']['cache_time'], str):
|
||||
return None, 'http_config.cache_time is invalid'
|
||||
if not re.match(r"^[0-9]+([smhd])$", data['http_config']['cache_time']):
|
||||
return None, 'http_config.cache_time is invalid'
|
||||
cache_suffix = data['http_config']['cache_suffix']
|
||||
cache_suffix_list = []
|
||||
for suffix in cache_suffix.split(","):
|
||||
tmp_suffix = re.sub(r"\s", "", suffix)
|
||||
if not tmp_suffix:
|
||||
continue
|
||||
cache_suffix_list.append(tmp_suffix)
|
||||
real_cache_suffix = ",".join(cache_suffix_list)
|
||||
if not real_cache_suffix:
|
||||
real_cache_suffix = "css,js,jpg,jpeg,gif,png,webp,woff,eot,ttf,svg,ico,css.map,js.map"
|
||||
data['http_config']['cache_suffix'] = real_cache_suffix
|
||||
|
||||
l = LoadSite(data.get('name'), data.get('site_name'), 'http', data.get('ps', ''),
|
||||
http_config=data.get('http_config'),
|
||||
created_at=data.get('created_at', 0), load_id=data.get('load_id', 0),
|
||||
site_id=data.get('site_id', 0))
|
||||
return l, ""
|
||||
|
||||
@classmethod
|
||||
def base_check(cls, data) -> str:
|
||||
if not data.get('name', None):
|
||||
return 'name is required'
|
||||
if not re.match(r'^[a-zA-Z0-9][a-zA-Z0-9_]+$', data['name']):
|
||||
return 'The name can only contain letters, numbers, underscores, and cannot start with numbers or underscores'
|
||||
if not len(data['name']) >= 3:
|
||||
return 'The length of the name cannot be less than 3 characters'
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def bind_tcp_load(cls, data: dict) -> Tuple[Optional["LoadSite"], str]:
|
||||
check_msg = cls.base_check(data)
|
||||
if check_msg:
|
||||
return None, check_msg
|
||||
if not isinstance(data.get('tcp_config', None), dict):
|
||||
return None, 'tcp_config is required'
|
||||
else:
|
||||
for k in ['proxy_connect_timeout', 'proxy_timeout', 'host', 'port', 'type']:
|
||||
if not data['tcp_config'].get(k):
|
||||
return None, 'tcp_config.{} is required'.format(k)
|
||||
if data['tcp_config']['type'] not in ('tcp', 'udp'):
|
||||
return None, 'tcp_config.type is invalid'
|
||||
if not isinstance(data['tcp_config']['port'], int) and not 1 <= data['tcp_config']['port'] <= 65535:
|
||||
return None, 'tcp_config.port is invalid'
|
||||
if not public.check_ip(data['tcp_config']['host']):
|
||||
return None, 'tcp_config.host is invalid'
|
||||
|
||||
l = LoadSite(data.get('name'), data.get('site_name'), 'tcp', ps=data.get('ps', ''),
|
||||
tcp_config=data.get('tcp_config'),
|
||||
created_at=data.get('created_at', 0), load_id=data.get('load_id', 0),
|
||||
site_id=data.get('site_id', 0))
|
||||
return l, ""
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"name": self.name,
|
||||
"site_name": self.site_name,
|
||||
"site_type": self.site_type,
|
||||
"ps": self.ps,
|
||||
"http_config": self.http_config,
|
||||
"tcp_config": self.tcp_config,
|
||||
"created_at": self.created_at,
|
||||
"load_id": self.load_id,
|
||||
"site_id": self.site_id
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class HttpNode:
|
||||
node_id: int
|
||||
node_site_name: str
|
||||
port: int
|
||||
location: str = "/"
|
||||
path: str = "/"
|
||||
node_status: str = "online" # online, backup, down
|
||||
weight: int = 1
|
||||
max_fail: int = 3
|
||||
fail_timeout: int = 600
|
||||
ps: str = ""
|
||||
created_at: int = 0
|
||||
node_site_id: int = 0
|
||||
id: int = 0
|
||||
load_id: int = 0
|
||||
|
||||
@classmethod
|
||||
def bind(cls, data: dict) -> Tuple[Optional["HttpNode"], str]:
|
||||
if not isinstance(data.get('node_site_name', None), str):
|
||||
return None, 'node_site_name is required'
|
||||
if not public.is_domain(data['node_site_name']) and not public.check_ip(data['node_site_name']):
|
||||
return None, 'node_site_name is invalid'
|
||||
if not isinstance(data.get('port', None), int):
|
||||
return None, 'port is required'
|
||||
if not 1 <= data['port'] <= 65535:
|
||||
return None, 'port is invalid'
|
||||
if not isinstance(data.get('node_id', None), int):
|
||||
return None, 'node_id is required'
|
||||
if not isinstance(data.get('node_status', None), str):
|
||||
return None, 'node_status is required'
|
||||
if not data['node_status'] in ('online', 'backup', 'down'):
|
||||
return None, 'node_status is invalid'
|
||||
|
||||
n = HttpNode(data.get('node_id'), data.get('node_site_name'), data.get('port'), "/",
|
||||
data.get('path', "/"), data.get('node_status', "online"), data.get('weight', 1),
|
||||
data.get('max_fail', 3), data.get('fail_timeout', 600), data.get('ps', ''),
|
||||
data.get('created_at', 0), data.get('node_site_id', 0), data.get('id', 0),
|
||||
data.get('load_id', 0)
|
||||
)
|
||||
return n, ""
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"node_id": self.node_id,
|
||||
"node_site_name": self.node_site_name,
|
||||
"port": self.port,
|
||||
"location": self.location,
|
||||
"path": self.path,
|
||||
"node_status": self.node_status,
|
||||
"weight": self.weight,
|
||||
"max_fail": self.max_fail,
|
||||
"fail_timeout": self.fail_timeout,
|
||||
"ps": self.ps,
|
||||
"created_at": self.created_at,
|
||||
"node_site_id": self.node_site_id,
|
||||
"id": self.id,
|
||||
"load_id": self.load_id
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class TcpNode:
|
||||
node_id: int
|
||||
host: str
|
||||
port: int
|
||||
id: int = 0
|
||||
load_id: int = 0
|
||||
node_status: str = "online" # online, backup, down
|
||||
weight: int = 1
|
||||
max_fail: int = 3
|
||||
fail_timeout: int = 600
|
||||
ps: str = ""
|
||||
created_at: int = 0
|
||||
|
||||
@classmethod
|
||||
def bind(cls, data: dict) -> Tuple[Optional["TcpNode"], str]:
|
||||
if not isinstance(data.get('node_status', None), str):
|
||||
return None, 'node_status is required'
|
||||
if not data['node_status'] in ('online', 'backup', 'down'):
|
||||
return None, 'node_status is invalid'
|
||||
if not isinstance(data.get('host', None), str):
|
||||
return None, 'host is required'
|
||||
if not isinstance(data.get('node_id', None), int):
|
||||
return None, 'node_id is required'
|
||||
if not isinstance(data.get('port', None), int):
|
||||
return None, 'port is required'
|
||||
if not 1 <= data['port'] <= 65535:
|
||||
return None, 'port is invalid'
|
||||
n = TcpNode(data.get('node_id'), data.get('host'), data.get('port'), data.get('id', 0), data.get('load_id', 0),
|
||||
data.get('node_status', "online"), data.get('weight', 1), data.get('max_fail', 3),
|
||||
data.get('fail_timeout', 600), data.get('ps', ''), data.get('created_at', 0))
|
||||
return n, ""
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"node_id": self.node_id,
|
||||
"host": self.host,
|
||||
"port": self.port,
|
||||
"id": self.id,
|
||||
"load_id": self.load_id,
|
||||
"node_status": self.node_status,
|
||||
"weight": self.weight,
|
||||
"max_fail": self.max_fail,
|
||||
"fail_timeout": self.fail_timeout,
|
||||
"ps": self.ps,
|
||||
"created_at": self.created_at
|
||||
}
|
||||
|
||||
|
||||
class NodeDB:
|
||||
_DB_FILE = public.get_panel_path() + "/data/db/node_load_balance.db"
|
||||
_DB_INIT_FILE = os.path.dirname(__file__) + "/load_balancer.sql"
|
||||
|
||||
def __init__(self):
|
||||
sql = db.Sql()
|
||||
sql._Sql__DB_FILE = self._DB_FILE
|
||||
self.db = sql
|
||||
|
||||
def init_db(self):
|
||||
sql_data = public.readFile(self._DB_INIT_FILE)
|
||||
if not os.path.exists(self._DB_FILE) or os.path.getsize(self._DB_FILE) == 0:
|
||||
public.writeFile(self._DB_FILE, "")
|
||||
import sqlite3
|
||||
conn = sqlite3.connect(self._DB_FILE)
|
||||
c = conn.cursor()
|
||||
c.executescript(sql_data)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
def close(self):
|
||||
self.db.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_trackback):
|
||||
self.close()
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
def update_load_key(self, load_id: int, load_data: dict) -> str:
|
||||
if not isinstance(load_id, int):
|
||||
return "load_id is required"
|
||||
if not isinstance(load_data, dict):
|
||||
return "load_data is required"
|
||||
err = self.db.table("load_sites").where("load_id = ?", load_id).update(load_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
return ""
|
||||
|
||||
def name_exist(self, name: str) -> bool:
|
||||
return self.db.table("load_sites").where("name = ?", name).count() > 0
|
||||
|
||||
def load_site_name_exist(self, name: str) -> bool:
|
||||
return self.db.table("load_sites").where("site_name = ?", name).count() > 0
|
||||
|
||||
def load_id_exist(self, load_id: int) -> bool:
|
||||
return self.db.table("load_sites").where("load_id = ?", load_id).count() > 0
|
||||
|
||||
def loads_count(self, site_type: str, query: str = "") -> int:
|
||||
if site_type == "http":
|
||||
if not query:
|
||||
return self.db.table("load_sites").where("site_type = ?", "http").count()
|
||||
return self.db.table("load_sites").where(
|
||||
"site_type = ? AND ps like ?", ("http", "%" + query + "%")).count()
|
||||
else:
|
||||
if not query:
|
||||
return self.db.table("load_sites").where("site_type = ?", "tcp").count()
|
||||
return self.db.table("load_sites").where(
|
||||
"site_type = ? AND ps like ?", ("tcp", "%" + query + "%")).count()
|
||||
|
||||
def loads_list(self, site_type: str, offset: int, limit: int, query: str = ""):
|
||||
if site_type == "all":
|
||||
if query:
|
||||
return self.db.table("load_sites").where("ps like ?", "%" + query + "%").limit(limit, offset).select()
|
||||
return self.db.table("load_sites").limit(limit, offset).select()
|
||||
if site_type == "http":
|
||||
if not query:
|
||||
return self.db.table("load_sites").where("site_type = ?", "http").limit(limit, offset).select()
|
||||
return self.db.table("load_sites").where(
|
||||
"site_type = ? AND ps like ?", ("http", "%" + query + "%")).limit(limit, offset).select()
|
||||
else:
|
||||
if not query:
|
||||
return self.db.table("load_sites").where("site_type = ?", "tcp").limit(limit, offset).select()
|
||||
return self.db.table("load_sites").where(
|
||||
"site_type = ? AND ps like ?", ("tcp", "%" + query + "%")).limit(limit, offset).select()
|
||||
|
||||
def create_load(self, site_type: str, load: LoadSite, nodes: List[Union[HttpNode, TcpNode]]) -> str:
|
||||
load_data = load.to_dict()
|
||||
load_data.pop('load_id')
|
||||
load_data.pop('created_at')
|
||||
load_data["http_config"] = json.dumps(load.http_config)
|
||||
load_data["tcp_config"] = json.dumps(load.tcp_config)
|
||||
try:
|
||||
err = self.db.table("load_sites").insert(load_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
load.load_id = err
|
||||
|
||||
for node in nodes:
|
||||
node_data = node.to_dict()
|
||||
node_data.pop('id')
|
||||
node_data.pop('created_at')
|
||||
node_data['load_id'] = load.load_id
|
||||
if site_type == "http" and isinstance(node, HttpNode):
|
||||
err = self.db.table("http_nodes").insert(node_data)
|
||||
else:
|
||||
err = self.db.table("tcp_nodes").insert(node_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
except Exception as e:
|
||||
return "数据库操作错误:" + str(e)
|
||||
|
||||
return ""
|
||||
|
||||
def update_load(self, site_type: str, load: LoadSite, nodes: List[Union[HttpNode, TcpNode]]) -> str:
|
||||
load_data = load.to_dict()
|
||||
if not load.load_id:
|
||||
return "load_id is required"
|
||||
load_data.pop('created_at')
|
||||
load_data.pop('load_id')
|
||||
load_data["http_config"] = json.dumps(load.http_config)
|
||||
load_data["tcp_config"] = json.dumps(load.tcp_config)
|
||||
|
||||
try:
|
||||
err = self.db.table("load_sites").where("load_id = ?", load.load_id).update(load_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
except Exception as e:
|
||||
return "数据库操作错误:" + str(e)
|
||||
|
||||
old_nodes, err = self.get_nodes(load.load_id, site_type)
|
||||
if err:
|
||||
return err
|
||||
old_nodes_map = {}
|
||||
for old_node in old_nodes:
|
||||
old_nodes_map[old_node['id']] = old_node
|
||||
|
||||
try:
|
||||
for node in nodes:
|
||||
node_data = node.to_dict()
|
||||
node_data.pop('id')
|
||||
node_data.pop('created_at')
|
||||
node_data['load_id'] = load.load_id
|
||||
if node.id in old_nodes_map:
|
||||
if site_type == "http" and isinstance(node, HttpNode):
|
||||
err = self.db.table("http_nodes").where("id = ?", node.id).update(node_data)
|
||||
else:
|
||||
err = self.db.table("tcp_nodes").where("id = ?", node.id).update(node_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
old_nodes_map.pop(node.id)
|
||||
else:
|
||||
if site_type == "http" and isinstance(node, HttpNode):
|
||||
err = self.db.table("http_nodes").insert(node_data)
|
||||
else:
|
||||
err = self.db.table("tcp_nodes").insert(node_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
for node_id in old_nodes_map:
|
||||
if site_type == "http":
|
||||
err = self.db.table("http_nodes").where("id = ?", node_id).delete()
|
||||
else:
|
||||
err = self.db.table("tcp_nodes").where("id = ?", node_id).delete()
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
except Exception as e:
|
||||
return "数据库操作错误:" + str(e)
|
||||
return ""
|
||||
|
||||
def get_nodes(self, load_id: int, site_type: str) -> Tuple[List[dict], str]:
|
||||
if site_type == "http":
|
||||
nodes: List[dict] = self.db.table("http_nodes").where("load_id = ?", load_id).select()
|
||||
else:
|
||||
nodes: List[dict] = self.db.table("tcp_nodes").where("load_id = ?", load_id).select()
|
||||
if isinstance(nodes, str):
|
||||
return [], nodes
|
||||
if not nodes and self.db.ERR_INFO:
|
||||
return [], self.db.ERR_INFO
|
||||
return nodes, ""
|
||||
|
||||
def get_load(self, load_id: int) -> Tuple[Optional[dict], str]:
|
||||
load_data = self.db.table("load_sites").where("load_id = ?", load_id).find()
|
||||
if isinstance(load_data, str):
|
||||
return None, load_data
|
||||
if self.db.ERR_INFO:
|
||||
return None, self.db.ERR_INFO
|
||||
if len(load_data) == 0:
|
||||
return None, "未查询到该负载配置"
|
||||
return load_data, ""
|
||||
|
||||
def delete(self, load_id: int) -> str:
|
||||
load_data = self.db.table("load_sites").where("load_id = ?", load_id).find()
|
||||
if isinstance(load_data, str):
|
||||
return load_data
|
||||
if self.db.ERR_INFO:
|
||||
return self.db.ERR_INFO
|
||||
if len(load_data) == 0:
|
||||
return ""
|
||||
|
||||
if load_data["site_type"] == "http":
|
||||
err = self.db.table("http_nodes").where("load_id = ?", load_id).delete()
|
||||
else:
|
||||
err = self.db.table("tcp_nodes").where("load_id = ?", load_id).delete()
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
err = self.db.table("load_sites").where("load_id = ?", load_id).delete()
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
return ""
|
||||
28
mod/project/node/dbutil/node.sql
Normal file
28
mod/project/node/dbutil/node.sql
Normal file
@@ -0,0 +1,28 @@
|
||||
CREATE TABLE IF NOT EXISTS `node`
|
||||
(
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`address` VARCHAR, -- 节点地址 https://xxx:xx/
|
||||
`category_id` INTEGER, -- 分类
|
||||
`remarks` VARCHAR, -- 节点名称
|
||||
`api_key` VARCHAR, -- api key
|
||||
`create_time` INTEGER DEFAULT (0), -- 创建时间
|
||||
`server_ip` TEXT, -- 服务器ip
|
||||
`status` INTEGER, -- 0: 不在线 1: 在线
|
||||
`error` TEXT DEFAULT '{}',
|
||||
`error_num` INTEGER DEFAULT 0,
|
||||
`app_key` TEXT, -- app key
|
||||
`ssh_conf` TEXT NOT NULL DEFAULT '{}',
|
||||
`ssh_test` INTEGER DEFAULT 0, -- 是否执行了ssh秘钥测试, 0: 未测试 1: 已测试
|
||||
`lpver` TEXT DEFAULT '' -- 1panel 版本,当目标面板时1panel时,记录版本是v1还是v2
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `category`
|
||||
(
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`name` VARCHAR,
|
||||
`create_time` INTEGER DEFAULT (0)
|
||||
);
|
||||
|
||||
INSERT INTO `node` (app_key, api_key, remarks, server_ip)
|
||||
SELECT 'local', 'local', 'Local node', '127.0.0.1'
|
||||
WHERE NOT EXISTS (SELECT 1 FROM `node` WHERE app_key = 'local' AND api_key = 'local');
|
||||
462
mod/project/node/dbutil/node_db.py
Normal file
462
mod/project/node/dbutil/node_db.py
Normal file
@@ -0,0 +1,462 @@
|
||||
import base64
|
||||
import json
|
||||
import os.path
|
||||
import re
|
||||
import time
|
||||
import sys
|
||||
from urllib.parse import urlparse
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Tuple, Optional, List, Union, Dict
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
import public
|
||||
import db
|
||||
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeAPPKey:
|
||||
origin: str
|
||||
request_token: str
|
||||
app_key: str
|
||||
app_token: str
|
||||
|
||||
def to_string(self)->str:
|
||||
data = "|".join((self.origin, self.request_token, self.app_key, self.app_token))
|
||||
return base64.b64encode(data.encode()).decode("utf-8")
|
||||
|
||||
|
||||
@dataclass
|
||||
class Node:
|
||||
remarks: str
|
||||
id: int = 0
|
||||
address: str = ""
|
||||
category_id: int = 0
|
||||
api_key: str = ""
|
||||
create_time: int = 0
|
||||
server_ip: str = ""
|
||||
status: int = 1
|
||||
error: dict = field(default_factory=dict)
|
||||
error_num: int = 0
|
||||
app_key: str = ""
|
||||
ssh_conf: dict = field(default_factory=dict)
|
||||
lpver: str = ""
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> Tuple[Optional["Node"], str]:
|
||||
if not isinstance(data.get('remarks', None), str):
|
||||
return None, 'remarks is required'
|
||||
if not data["remarks"].strip():
|
||||
return None, 'remarks is required'
|
||||
data["remarks"] = data["remarks"].strip()
|
||||
|
||||
api_key = data.get('api_key', '')
|
||||
app_key = data.get('app_key', '')
|
||||
ssh_conf: dict = data.get('ssh_conf', {})
|
||||
if not api_key and not app_key and not ssh_conf:
|
||||
return None, 'api_key or app_key or ssh_conf is required'
|
||||
|
||||
if app_key:
|
||||
app = cls.parse_app_key(app_key)
|
||||
if not app:
|
||||
return None, 'App_key format error'
|
||||
data["address"] = app.origin
|
||||
url = urlparse(data["address"], allow_fragments=False)
|
||||
if not url.scheme or not url.netloc:
|
||||
return None, 'address is invalid'
|
||||
|
||||
if api_key:
|
||||
if not isinstance(data.get('address', None), str):
|
||||
return None, 'address is required'
|
||||
url = urlparse(data["address"], allow_fragments=False)
|
||||
if not url.scheme or not url.netloc:
|
||||
return None, 'address is invalid'
|
||||
|
||||
if ssh_conf:
|
||||
for key in ("host", "port"):
|
||||
if key not in ssh_conf:
|
||||
return None, 'ssh_conf is invalid'
|
||||
if "username" not in ssh_conf:
|
||||
ssh_conf["username"] = "root"
|
||||
if "password" not in ssh_conf:
|
||||
ssh_conf["password"] = ""
|
||||
if "pkey" not in ssh_conf:
|
||||
ssh_conf["pkey"] = ""
|
||||
if "pkey_passwd" not in ssh_conf:
|
||||
ssh_conf["pkey_passwd"] = ""
|
||||
|
||||
if ssh_conf and not data.get("address", None):
|
||||
data["address"] = ssh_conf["host"]
|
||||
|
||||
n = Node(
|
||||
data["remarks"], id=data.get('id', 0), address=data.get("address"), category_id=int(data.get('category_id', 0)),
|
||||
api_key=api_key, create_time=data.get('create_time', 0), server_ip=data.get('server_ip', ''),
|
||||
status=data.get('status', 1), error=data.get('error', {}), error_num=data.get('error_num', 0),
|
||||
app_key=app_key, ssh_conf=ssh_conf, lpver=data.get('lpver', '')
|
||||
)
|
||||
return n, ''
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"remarks": self.remarks,
|
||||
"id": self.id,
|
||||
"address": self.address,
|
||||
"category_id": self.category_id,
|
||||
"api_key": self.api_key,
|
||||
"create_time": self.create_time,
|
||||
"server_ip": self.server_ip,
|
||||
"status": self.status,
|
||||
"error": self.error,
|
||||
"error_num": self.error_num,
|
||||
"app_key": self.app_key,
|
||||
"ssh_conf": self.ssh_conf,
|
||||
"lpver": self.lpver
|
||||
}
|
||||
|
||||
def parse_server_ip(self):
|
||||
import socket
|
||||
from urllib.parse import urlparse
|
||||
if not self.address.startswith("http"):
|
||||
host = self.address # 仅 ssh时 address本身就是host
|
||||
else:
|
||||
host = urlparse(self.address).hostname
|
||||
if isinstance(host, str) and public.check_ip(host):
|
||||
return host
|
||||
try:
|
||||
ip_address = socket.gethostbyname(host)
|
||||
return ip_address
|
||||
except socket.gaierror as e:
|
||||
public.print_log(f"Error: {e}")
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def parse_app_key(app_key: str) -> Optional[NodeAPPKey]:
|
||||
try:
|
||||
data = base64.b64decode(app_key).decode("utf-8")
|
||||
origin, request_token, app_key, app_token = data.split("|")
|
||||
origin_arr = origin.split(":")
|
||||
if len(origin_arr) > 3:
|
||||
origin = ":".join(origin_arr[:3])
|
||||
return NodeAPPKey(origin, request_token, app_key, app_token)
|
||||
except:
|
||||
return None
|
||||
|
||||
|
||||
class ServerNodeDB:
|
||||
_DB_FILE = public.get_panel_path() + "/data/db/node.db"
|
||||
_DB_INIT_FILE = os.path.dirname(__file__) + "/node.sql"
|
||||
|
||||
def __init__(self):
|
||||
sql = db.Sql()
|
||||
sql._Sql__DB_FILE = self._DB_FILE
|
||||
self.db = sql
|
||||
|
||||
def init_db(self):
|
||||
sql_data = public.readFile(self._DB_INIT_FILE)
|
||||
import sqlite3
|
||||
conn = sqlite3.connect(self._DB_FILE)
|
||||
cur = conn.cursor()
|
||||
cur.executescript(sql_data)
|
||||
cur.execute("PRAGMA table_info(node)")
|
||||
existing_cols = [row[1] for row in cur.fetchall()]
|
||||
if "ssh_test" in existing_cols:
|
||||
pass
|
||||
# print("字段 ssh_test 已存在")
|
||||
else:
|
||||
cur.execute("ALTER TABLE node ADD COLUMN ssh_test INTEGER DEFAULT (0)")
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
def close(self):
|
||||
self.db.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_trackback):
|
||||
self.close()
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
def is_local_node(self, node_id: int):
|
||||
return self.db.table('node').where("id=? AND app_key = 'local' AND api_key = 'local'", (node_id,)).count() > 0
|
||||
|
||||
def get_local_node(self):
|
||||
data = self.db.table('node').where("app_key = 'local' AND api_key = 'local'", ()).find()
|
||||
if isinstance(data, dict):
|
||||
return data
|
||||
return {
|
||||
"id": 0,
|
||||
"address": "",
|
||||
"category_id": 0,
|
||||
"remarks": "Local node",
|
||||
"api_key": "local",
|
||||
"create_time": time.strftime('%Y-%m-%d %H:%M:%S'),
|
||||
"server_ip": "127.0.0.1",
|
||||
"status": 0,
|
||||
"error": 0,
|
||||
"error_num": 0,
|
||||
"app_key": "local",
|
||||
"ssh_conf": "{}",
|
||||
"lpver": "",
|
||||
}
|
||||
|
||||
def create_node(self, node: Node) -> str:
|
||||
node_data = node.to_dict()
|
||||
node_data.pop("id")
|
||||
node_data["create_time"] = time.strftime('%Y-%m-%d %H:%M:%S')
|
||||
node_data.pop("error")
|
||||
node_data["status"] = 1
|
||||
node_data["ssh_conf"] = json.dumps(node_data["ssh_conf"])
|
||||
|
||||
if node.category_id > 0 and not self.category_exites(node.category_id):
|
||||
return "Classification does not exist"
|
||||
|
||||
if self.db.table('node').where('remarks=?', (node.remarks,)).count() > 0:
|
||||
return "The node with this name already exists"
|
||||
try:
|
||||
node_id = self.db.table('node').insert(node_data)
|
||||
if isinstance(node_id, int):
|
||||
node.id = node_id
|
||||
return ""
|
||||
elif isinstance(node_id, str):
|
||||
return node_id
|
||||
else:
|
||||
return str(node_id)
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
def update_node(self, node: Node, with_out_fields: List[str] = Node) -> str:
|
||||
if self.is_local_node(node.id):
|
||||
return "Cannot modify local nodes"
|
||||
if not self.node_id_exites(node.id):
|
||||
return "Node does not exist"
|
||||
node_data = node.to_dict()
|
||||
node_data.pop("create_time")
|
||||
node_data.pop("id")
|
||||
node_data["ssh_conf"] = json.dumps(node_data["ssh_conf"])
|
||||
node_data["error"] = json.dumps(node_data["error"])
|
||||
if with_out_fields and isinstance(with_out_fields, list):
|
||||
for f in with_out_fields:
|
||||
if f in node_data:
|
||||
node_data.pop(f)
|
||||
|
||||
if node.category_id > 0 and not self.category_exites(node.category_id):
|
||||
node.category_id = 0
|
||||
node_data["category_id"] = 0
|
||||
try:
|
||||
res = self.db.table('node').where('id=?', (node.id,)).update(node_data)
|
||||
if isinstance(res, str):
|
||||
return res
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
return ""
|
||||
|
||||
def set_node_ssh_conf(self, node_id: int, ssh_conf: dict, ssh_test: int=0):
|
||||
pdata = {"ssh_conf": json.dumps(ssh_conf)}
|
||||
if ssh_test:
|
||||
pdata["ssh_test"] = 1
|
||||
self.db.table('node').where('id=?', (node_id,)).update(pdata)
|
||||
return
|
||||
|
||||
def remove_node_ssh_conf(self, node_id: int):
|
||||
self.db.table('node').where('id=?', (node_id,)).update({"ssh_conf": "{}"})
|
||||
return
|
||||
|
||||
def delete_node(self, node_id: int) -> str:
|
||||
if self.is_local_node(node_id):
|
||||
return "Cannot delete local node"
|
||||
if not self.node_id_exites(node_id):
|
||||
return "Node does not exist"
|
||||
try:
|
||||
res = self.db.table('node').where('id=?', (node_id,)).delete()
|
||||
if isinstance(res, str):
|
||||
return res
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
return ""
|
||||
|
||||
def find_node(self, api_key:str = "", app_key: str = "") -> Optional[dict]:
|
||||
res = self.db.table('node').where('api_key=?', (api_key, app_key)).find()
|
||||
if isinstance(res, dict):
|
||||
return res
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_node_list(self,
|
||||
search: str = "",
|
||||
category_id: int = -1,
|
||||
offset: int = 0,
|
||||
limit: int = 10) -> Tuple[List[Dict], str]:
|
||||
try:
|
||||
args = []
|
||||
query_str = ""
|
||||
if search:
|
||||
query_str += "remarks like ?"
|
||||
args.append('%{}%'.format(search))
|
||||
if category_id >= 0:
|
||||
if query_str:
|
||||
query_str += " and category_id=?"
|
||||
else:
|
||||
query_str += "category_id=?"
|
||||
args.append(category_id)
|
||||
if query_str:
|
||||
data_list = self.db.table('node').where(query_str, args).order('id desc').limit(limit, offset).select()
|
||||
else:
|
||||
data_list = self.db.table('node').order('id desc').limit(limit, offset).select()
|
||||
if self.db.ERR_INFO:
|
||||
return [], self.db.ERR_INFO
|
||||
if not isinstance(data_list, list):
|
||||
return [], str(data_list)
|
||||
return data_list, ""
|
||||
except Exception as e:
|
||||
return [], str(e)
|
||||
|
||||
def query_node_list(self, *args) -> List[Dict]:
|
||||
return self.db.table('node').where(*args).select()
|
||||
|
||||
def category_exites(self, category_id: int) -> bool:
|
||||
return self.db.table('category').where('id=?', (category_id,)).count() > 0
|
||||
|
||||
def node_id_exites(self, node_id: int) -> bool:
|
||||
return self.db.table('node').where('id=?', (node_id,)).count() > 0
|
||||
|
||||
def category_map(self) -> Dict:
|
||||
default_data = {0: "Default classification"}
|
||||
data_list = self.db.table('category').field('id,name').select()
|
||||
if isinstance(data_list, list):
|
||||
for data in data_list:
|
||||
default_data[data["id"]] = data["name"]
|
||||
return default_data
|
||||
|
||||
def node_map(self) -> Dict:
|
||||
default_data = {}
|
||||
data_list = self.db.table('node').field('id,remarks').select()
|
||||
if isinstance(data_list, list):
|
||||
for data in data_list:
|
||||
default_data[data["id"]] = data["remarks"]
|
||||
return default_data
|
||||
|
||||
def create_category(self, name: str) -> str:
|
||||
if self.db.table('category').where('name=?', (name,)).count() > 0:
|
||||
return "The classification for this name already exists"
|
||||
try:
|
||||
res = self.db.table('category').insert({"name": name, "create_time": time.strftime('%Y-%m-%d %H:%M:%S')})
|
||||
if isinstance(res, str):
|
||||
return res
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
return ""
|
||||
|
||||
def delete_category(self, category_id: int):
|
||||
self.db.table('node').where('category_id=?', (category_id,)).update({"category_id": 0})
|
||||
self.db.table('category').where('id=?', (category_id,)).delete()
|
||||
|
||||
def bind_category_to_node(self, node_id: List[int], category_id: int) -> str:
|
||||
if not node_id:
|
||||
return "Node ID cannot be empty"
|
||||
if category_id > 0 and not self.category_exites(category_id):
|
||||
return "Classification does not exist"
|
||||
|
||||
try:
|
||||
err = self.db.table('node').where(
|
||||
'id in ({})'.format(",".join(["?"]*len(node_id))), (*node_id,)
|
||||
).update({"category_id": category_id})
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
return ""
|
||||
|
||||
def node_count(self, search, category_id) -> int:
|
||||
try:
|
||||
args = []
|
||||
query_str = ""
|
||||
if search:
|
||||
query_str += "remarks like ?"
|
||||
args.append('%{}%'.format(search))
|
||||
if category_id >= 0:
|
||||
if query_str:
|
||||
query_str += " and category_id=?"
|
||||
else:
|
||||
query_str += "category_id=?"
|
||||
args.append(category_id)
|
||||
if query_str:
|
||||
count = self.db.table('node').where(query_str, args).order('id desc').count()
|
||||
else:
|
||||
count = self.db.table('node').order('id desc').count()
|
||||
return count
|
||||
except:
|
||||
return 0
|
||||
|
||||
def get_node_by_id(self, node_id: int) -> Optional[Dict]:
|
||||
try:
|
||||
data = self.db.table('node').where('id=?', (node_id,)).find()
|
||||
if self.db.ERR_INFO:
|
||||
return None
|
||||
if not isinstance(data, dict):
|
||||
return None
|
||||
return data
|
||||
except:
|
||||
return None
|
||||
|
||||
class ServerMonitorRepo:
|
||||
_REPO_DIR = public.get_panel_path() + "/data/mod_node_status_cache/"
|
||||
|
||||
def __init__(self):
|
||||
if not os.path.exists(self._REPO_DIR):
|
||||
os.makedirs(self._REPO_DIR)
|
||||
|
||||
def set_wait_reboot(self, server_ip: str, start: bool):
|
||||
wait_file = os.path.join(self._REPO_DIR, "wait_reboot_{}".format(server_ip))
|
||||
if start:
|
||||
return public.writeFile(wait_file, "wait_reboot")
|
||||
else:
|
||||
if os.path.exists(wait_file):
|
||||
os.remove(wait_file)
|
||||
|
||||
def is_reboot_wait(self, server_ip: str):
|
||||
wait_file = os.path.join(self._REPO_DIR, "wait_reboot_{}".format(server_ip))
|
||||
# 重器待等待时间超过10分钟认为超时
|
||||
return os.path.exists(wait_file) and os.path.getmtime(wait_file) > time.time() - 610
|
||||
|
||||
@staticmethod
|
||||
def get_local_server_status():
|
||||
from system import system
|
||||
return system().GetNetWork(None)
|
||||
|
||||
def get_server_status(self, server_id: int) -> Optional[Dict]:
|
||||
cache_file = os.path.join(self._REPO_DIR, "server_{}.json".format(server_id))
|
||||
if not os.path.exists(cache_file):
|
||||
return None
|
||||
|
||||
mtime = os.path.getmtime(cache_file)
|
||||
if time.time() - mtime > 60 * 5:
|
||||
os.remove(cache_file)
|
||||
return None
|
||||
try:
|
||||
data = public.readFile(cache_file)
|
||||
if isinstance(data, str):
|
||||
return json.loads(data)
|
||||
except:
|
||||
return None
|
||||
|
||||
def save_server_status(self, server_id: int, data: Dict) -> str:
|
||||
cache_file = os.path.join(self._REPO_DIR, "server_{}.json".format(server_id))
|
||||
try:
|
||||
public.writeFile(cache_file, json.dumps(data))
|
||||
return ""
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
def remove_cache(self, server_id: int):
|
||||
cache_file = os.path.join(self._REPO_DIR, "server_{}.json".format(server_id))
|
||||
if os.path.exists(cache_file):
|
||||
os.remove(cache_file)
|
||||
1149
mod/project/node/dbutil/node_task_flow.py
Normal file
1149
mod/project/node/dbutil/node_task_flow.py
Normal file
File diff suppressed because it is too large
Load Diff
144
mod/project/node/dbutil/node_task_flow.sql
Normal file
144
mod/project/node/dbutil/node_task_flow.sql
Normal file
@@ -0,0 +1,144 @@
|
||||
-- 创建脚本表
|
||||
CREATE TABLE IF NOT EXISTS scripts
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL CHECK (length(name) <= 255),
|
||||
script_type TEXT NOT NULL CHECK (length(script_type) <= 255),
|
||||
content TEXT NOT NULL,
|
||||
description TEXT,
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
|
||||
-- 创建任务流
|
||||
CREATE TABLE IF NOT EXISTS flows
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
server_ids TEXT NOT NULL, -- 存储服务器ID列表
|
||||
step_count INTEGER NOT NULL,
|
||||
strategy TEXT NOT NULL, -- 对于不同任务的处理策略, json字段
|
||||
status TEXT NOT NULL, -- 总体状态 waiting, running, complete, error
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
|
||||
-- 创建命令行任务表
|
||||
CREATE TABLE IF NOT EXISTS command_tasks
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
flow_id INTEGER NOT NULL,
|
||||
name TEXT NOT NULL CHECK (length(name) <= 255),
|
||||
step_index INTEGER NOT NULL,
|
||||
script_id INTEGER NOT NULL,
|
||||
script_content TEXT NOT NULL,
|
||||
script_type TEXT NOT NULL CHECK (length(script_type) <= 255),
|
||||
status INTEGER NOT NULL DEFAULT 0 CHECK (status IN (0, 1, 2, 3)), -- 0: 等待中, 1: 进行中, 2: 成功, 3: 失败
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
|
||||
-- 创建命令行任务日志表
|
||||
CREATE TABLE IF NOT EXISTS command_logs
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
command_task_id INTEGER NOT NULL,
|
||||
server_id INTEGER NOT NULL,
|
||||
ssh_host TEXT NOT NULL,
|
||||
status INTEGER NOT NULL DEFAULT 0 CHECK (status IN (0, 1, 2, 3, 4)), -- 0: 等待中, 1: 进行中, 2: 成功, 3: 失败, 4: 异常
|
||||
log_name TEXT NOT NULL CHECK (length(log_name) <= 255)
|
||||
);
|
||||
|
||||
-- 传输任务表
|
||||
CREATE TABLE IF NOT EXISTS transfer_tasks
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL CHECK (length(name) <= 255),
|
||||
flow_id INTEGER NOT NULL, -- 当本机不是数据源节点时, 本字段的值为 0
|
||||
step_index INTEGER NOT NULL,
|
||||
src_node TEXT NOT NULL, -- 数据源节点, json字段
|
||||
src_node_task_id INTEGER NOT NULL, -- 当本机是数据源节点时, 本字段的值为 0, 否则为目标机器上的transfer_tasks.id
|
||||
dst_nodes TEXT NOT NULL, -- 目标节点,多个,json字段
|
||||
message TEXT NOT NULL DEFAULT '', -- 与目标节点的链接错误信息
|
||||
path_list TEXT NOT NULL DEFAULT '[]', -- 源节点上的路径 [{"path":"/www/wwwroots", "is_dir":true}]
|
||||
status INTEGER NOT NULL DEFAULT 0 CHECK (status IN (0, 1, 2, 3)), -- 0: 等待中, 1: 进行中, 2: 成功, 3: 失败
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
|
||||
-- 传输文件列表
|
||||
CREATE TABLE IF NOT EXISTS transfer_files
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
flow_id INTEGER NOT NULL,
|
||||
transfer_task_id INTEGER NOT NULL,
|
||||
src_file TEXT NOT NULL, -- 源文件
|
||||
dst_file TEXT NOT NULL, -- 目标文件
|
||||
file_size INTEGER NOT NULL, -- 文件大小
|
||||
is_dir INTEGER NOT NULL DEFAULT 0
|
||||
);
|
||||
|
||||
|
||||
-- 传输文件列表
|
||||
CREATE TABLE IF NOT EXISTS transfer_logs
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
flow_id INTEGER NOT NULL,
|
||||
transfer_task_id INTEGER NOT NULL,
|
||||
transfer_file_id INTEGER NOT NULL,
|
||||
dst_node_idx INTEGER NOT NULL, -- 目标节点索引,基于 transfer_tasks.dst_nodes
|
||||
status INTEGER NOT NULL DEFAULT 0 CHECK (status IN (0, 1, 2, 3, 4)), -- 0: 等待中, 1: 进行中, 2: 成功, 3: 失败, 4: 跳过
|
||||
progress INTEGER DEFAULT 0, -- 0-100
|
||||
message TEXT NOT NULL DEFAULT '',
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
started_at INTEGER,
|
||||
completed_at INTEGER
|
||||
);
|
||||
|
||||
-- 创建流程模板表
|
||||
CREATE TABLE IF NOT EXISTS flow_templates
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL CHECK (length(name) <= 255),
|
||||
key_words TEXT NOT NULL DEFAULT '', -- 关键字词,用来查询内容是子任务的名称
|
||||
description TEXT NOT NULL DEFAULT '', -- 模板描述
|
||||
content TEXT NOT NULL, -- json字段,由前端构建,实际流程内容
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_scripts_name ON scripts (name);
|
||||
CREATE INDEX IF NOT EXISTS idx_scripts_description ON scripts (description);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_flow_server_ids ON flows (server_ids);
|
||||
|
||||
-- command_tasks 表
|
||||
CREATE INDEX IF NOT EXISTS idx_command_tasks_flow_id ON command_tasks (flow_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_command_tasks_script_id ON command_tasks (script_id);
|
||||
|
||||
-- command_logs 表
|
||||
CREATE INDEX IF NOT EXISTS idx_command_logs_task_id ON command_logs (command_task_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_command_logs_server_id ON command_logs (server_id);
|
||||
-- command_logs 状态查询
|
||||
CREATE INDEX IF NOT EXISTS idx_command_logs_status ON command_logs (command_task_id, status);
|
||||
|
||||
-- transfer_tasks 表
|
||||
CREATE INDEX IF NOT EXISTS idx_transfer_tasks_flow_id ON transfer_tasks (flow_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_transfer_tasks_src_node_task_id ON transfer_tasks (src_node_task_id);
|
||||
|
||||
-- transfer_files 表
|
||||
CREATE INDEX IF NOT EXISTS idx_transfer_files_task_id ON transfer_files (transfer_task_id);
|
||||
|
||||
-- transfer_logs 表
|
||||
CREATE INDEX IF NOT EXISTS idx_transfer_logs_flow_id ON transfer_logs (flow_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_transfer_logs_task_id ON transfer_logs (transfer_task_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_transfer_logs_file_id ON transfer_logs (transfer_file_id);
|
||||
-- transfer_logs 状态查询
|
||||
CREATE INDEX IF NOT EXISTS idx_transfer_logs_status ON transfer_logs (transfer_file_id, status);
|
||||
|
||||
-- flow_templates 表
|
||||
CREATE INDEX IF NOT EXISTS idx_flow_templates_name ON flow_templates (name);
|
||||
CREATE INDEX IF NOT EXISTS idx_flow_templates_key_words ON flow_templates (key_words);
|
||||
|
||||
|
||||
|
||||
|
||||
115
mod/project/node/executor/__init__.py
Normal file
115
mod/project/node/executor/__init__.py
Normal file
@@ -0,0 +1,115 @@
|
||||
import json
|
||||
import threading
|
||||
import queue
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from mod.base.ssh_executor import SSHExecutor
|
||||
from mod.project.node.dbutil import ServerNodeDB, ExecutorDB, ExecutorLog
|
||||
|
||||
|
||||
class Task(object):
|
||||
def __init__(self, task_id: int, log_id: int):
|
||||
self._edb = ExecutorDB()
|
||||
self.task = self._edb.ExecutorTask.find("id = ?", (task_id,))
|
||||
if not self.task:
|
||||
raise RuntimeError("The specified task does not exist")
|
||||
if log_id == 0:
|
||||
self.task.elogs = self._edb.ExecutorLog.query("executor_task_id = ?", (self.task.id,))
|
||||
else:
|
||||
self.task.elogs = [self._edb.ExecutorLog.find("executor_task_id = ? AND id = ?", (self.task.id, log_id))]
|
||||
if not self.task.elogs:
|
||||
raise RuntimeError("Task has no execution entry")
|
||||
|
||||
self.end_queue = queue.Queue()
|
||||
self.end_status = False
|
||||
|
||||
|
||||
def end_func(self):
|
||||
self._edb = ExecutorDB()
|
||||
while not self.end_queue.empty() or not self.end_status:
|
||||
if self.end_queue.empty():
|
||||
time.sleep(0.1)
|
||||
|
||||
elog: ExecutorLog = self.end_queue.get()
|
||||
self._edb.ExecutorLog.update(elog)
|
||||
|
||||
def start(self):
|
||||
thread_list = []
|
||||
s_db = ServerNodeDB()
|
||||
for log in self.task.elogs:
|
||||
node = s_db.get_node_by_id(log.server_id)
|
||||
if not node:
|
||||
log.status = 2
|
||||
log.update_log("Node data loss, unable to execute\n")
|
||||
self._edb.ExecutorLog.update(log)
|
||||
|
||||
ssh_conf = json.loads(node["ssh_conf"])
|
||||
if not ssh_conf:
|
||||
log.status = 2
|
||||
log.update_log("Node SSH configuration data lost, unable to execute\n")
|
||||
self._edb.ExecutorLog.update(log)
|
||||
|
||||
thread = threading.Thread(target=self.run_one, args=(ssh_conf, log))
|
||||
thread.start()
|
||||
thread_list.append(thread)
|
||||
|
||||
self._edb.close()
|
||||
end_th = threading.Thread(target=self.end_func)
|
||||
end_th.start()
|
||||
|
||||
for i in thread_list:
|
||||
i.join()
|
||||
self.end_status = True
|
||||
end_th.join()
|
||||
|
||||
def run_one(self, ssh_conf: dict, elog: ExecutorLog):
|
||||
ssh = SSHExecutor(
|
||||
host=ssh_conf["host"],
|
||||
port=ssh_conf["port"],
|
||||
username=ssh_conf["username"],
|
||||
password=ssh_conf["password"],
|
||||
key_data=ssh_conf["pkey"],
|
||||
passphrase=ssh_conf["pkey_passwd"])
|
||||
elog.write_log("Start executing the task\nStart establishing SSH connection...\n")
|
||||
try:
|
||||
ssh.open()
|
||||
def on_stdout(data):
|
||||
if isinstance(data, bytes):
|
||||
data = data.decode()
|
||||
print(data)
|
||||
elog.write_log(data)
|
||||
|
||||
elog.write_log("Start executing script...\n\n")
|
||||
t = time.time()
|
||||
res_code = ssh.execute_script_streaming(
|
||||
script_content=self.task.script_content,
|
||||
script_type=self.task.script_type,
|
||||
timeout=60*60,
|
||||
on_stdout=on_stdout,
|
||||
on_stderr=on_stdout
|
||||
)
|
||||
take_time = round((time.time() - t)* 1000, 2)
|
||||
elog.write_log("\n\nExecution completed, time-consuming [{}ms]\n".format(take_time))
|
||||
if res_code == 0:
|
||||
elog.status = 1
|
||||
elog.write_log("Mission accomplished\n", is_end_log=True)
|
||||
else:
|
||||
elog.status = 3
|
||||
elog.write_log("Task exception, return status code is:{}\n".format(res_code), is_end_log=True)
|
||||
self.end_queue.put(elog)
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
elog.status = 2
|
||||
elog.write_log("\nTask failed, error:" + str(e), is_end_log=True)
|
||||
self.end_queue.put(elog)
|
||||
return
|
||||
|
||||
|
||||
# log_id 要执行的子任务,默认为 0,表示执行所有子任务
|
||||
def run_executor_task(task_id: int, log_id: int = 0):
|
||||
t = Task(task_id, log_id)
|
||||
t.start()
|
||||
|
||||
|
||||
|
||||
913
mod/project/node/executorMod.py
Normal file
913
mod/project/node/executorMod.py
Normal file
@@ -0,0 +1,913 @@
|
||||
import json
|
||||
import os.path
|
||||
import threading
|
||||
import time
|
||||
import psutil
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from typing import List, Dict, Optional, Tuple, MutableMapping, Union
|
||||
|
||||
import simple_websocket
|
||||
from mod.base import json_response, list_args
|
||||
from mod.project.node.nodeutil import ServerNode, LocalNode, LPanelNode, SSHApi
|
||||
from mod.project.node.dbutil import Script, CommandLog, TaskFlowsDB, CommandTask, ServerNodeDB, TransferTask, \
|
||||
ServerMonitorRepo, Flow, FlowTemplates
|
||||
from mod.project.node.task_flow import self_file_running_log, flow_running_log, flow_useful_version, file_task_run_sync, \
|
||||
command_task_run_sync
|
||||
|
||||
import public
|
||||
|
||||
|
||||
class main:
|
||||
next_flow_tip_name = "user_next_flow_tip"
|
||||
|
||||
@staticmethod
|
||||
def create_script(get):
|
||||
e_db = TaskFlowsDB()
|
||||
err = Script.check(get)
|
||||
if err:
|
||||
return public.return_message(-1, 0,=err)
|
||||
s = Script.from_dict(get)
|
||||
# 查重
|
||||
if e_db.Script.find("name = ?", (s.name,)):
|
||||
return public.return_message(-1, 0,"Script name already exists")
|
||||
err = e_db.Script.create(s)
|
||||
if isinstance(err, str):
|
||||
return public.return_message(-1, 0,err)
|
||||
# return json_response(status=True, msg="Created successfully", data=s.to_dict())
|
||||
return public.return_message(0, 0,s.to_dict())
|
||||
|
||||
@staticmethod
|
||||
def modify_script(get):
|
||||
e_db = TaskFlowsDB()
|
||||
err = Script.check(get)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
s = Script.from_dict(get)
|
||||
if not s.id:
|
||||
return public.return_message(-1, 0,"Script ID cannot be empty")
|
||||
if not e_db.Script.find("id = ?", (s.id,)):
|
||||
return public.return_message(-1, 0,"Script does not exist")
|
||||
err = e_db.Script.update(s)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
# return json_response(status=True, msg="Modified successfully", data=s.to_dict())
|
||||
return public.return_message(0, 0,s.to_dict())
|
||||
|
||||
@staticmethod
|
||||
def delete_script(get):
|
||||
e_db = TaskFlowsDB()
|
||||
if not get.id:
|
||||
return public.return_message(-1, 0,"Script ID cannot be empty")
|
||||
try:
|
||||
del_id = int(get.id)
|
||||
except:
|
||||
return public.return_message(-1, 0,"Script ID format error")
|
||||
|
||||
e_db.Script.delete(del_id)
|
||||
return public.return_message(0, 0,"Deleted successfully")
|
||||
|
||||
@staticmethod
|
||||
def get_script_list(get):
|
||||
page_num = max(int(get.get('p/d', 1)), 1)
|
||||
limit = max(int(get.get('limit/d', 16)), 1)
|
||||
search = get.get('search', "").strip()
|
||||
script_type = get.get('script_type/s', "all")
|
||||
if not script_type in ["all", "python", "shell"]:
|
||||
script_type = "all"
|
||||
|
||||
where_list, params = [], []
|
||||
if search:
|
||||
where_list.append("(name like ? or content like ? or description like ?)")
|
||||
params.append("%{}%".format(search))
|
||||
params.append("%{}%".format(search))
|
||||
params.append("%{}%".format(search))
|
||||
|
||||
if script_type != "all":
|
||||
where_list.append("script_type = ?")
|
||||
params.append(script_type)
|
||||
|
||||
where = " and ".join(where_list)
|
||||
e_db = TaskFlowsDB()
|
||||
data_list = e_db.Script.query_page(where, (*params,), page_num=page_num, limit=limit)
|
||||
count = e_db.Script.count(where, params)
|
||||
page = public.get_page(count, page_num, limit)
|
||||
page["data"] = [i.to_dict() for i in data_list]
|
||||
return page
|
||||
|
||||
@staticmethod
|
||||
def bath_delete_script(get):
|
||||
script_ids = list_args(get, 'script_ids')
|
||||
try:
|
||||
script_ids = [int(i) for i in script_ids]
|
||||
except:
|
||||
return public.return_message(-1, 0,"Script ID format error")
|
||||
if not script_ids:
|
||||
return public.return_message(-1, 0,"Script ID cannot be empty")
|
||||
|
||||
e_db = TaskFlowsDB()
|
||||
err = e_db.Script.delete(script_ids)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
return public.return_message(0, 0,"Deleted successfully")
|
||||
|
||||
@staticmethod
|
||||
def create_task(get):
|
||||
node_ids = list_args(get, 'node_ids')
|
||||
if not node_ids:
|
||||
return public.return_message(-1, 0,"Node ID cannot be empty")
|
||||
try:
|
||||
node_ids = [int(i) for i in node_ids]
|
||||
except:
|
||||
return public.return_message(-1, 0,"Node ID format error")
|
||||
|
||||
e_db = TaskFlowsDB()
|
||||
script_id = get.get('script_id/d', 0)
|
||||
if script_id:
|
||||
s = e_db.Script.find("id = ?", (script_id,))
|
||||
if not s:
|
||||
return public.return_message(-1, 0,"Script does not exist")
|
||||
|
||||
elif get.get("script_content/s", "").strip():
|
||||
if not (get.get("script_type", "").strip() in ("python", "shell")):
|
||||
return public.return_message(-1, 0,"Script type error")
|
||||
s = Script("", get.get("script_type", "").strip(), content=get.get("script_content", "").strip())
|
||||
s.id = 0
|
||||
else:
|
||||
return public.return_message(-1, 0,"Please select a script")
|
||||
|
||||
nodes_db = ServerNodeDB()
|
||||
nodes = []
|
||||
timestamp = int(datetime.now().timestamp())
|
||||
for i in node_ids:
|
||||
n = nodes_db.get_node_by_id(i)
|
||||
if not n:
|
||||
return public.return_message(-1, 0,"The node with node ID [{}] does not exist".format(i))
|
||||
n["ssh_conf"] = json.loads(n["ssh_conf"])
|
||||
if not n["ssh_conf"]:
|
||||
return public.return_message(-1, 0,"The node with node ID [{}] has not configured SSH information and cannot distribute instructions".format(i))
|
||||
n["log_name"] = "{}_{}_{}.log".format(public.md5(s.content)[::2], timestamp, n['remarks'])
|
||||
nodes.append(n)
|
||||
|
||||
e_task = CommandTask(
|
||||
script_id=s.id,
|
||||
script_content=s.content,
|
||||
script_type=s.script_type,
|
||||
flow_id=0,
|
||||
step_index=0,
|
||||
)
|
||||
command_task_id = e_db.CommandTask.create(e_task)
|
||||
e_task.id = command_task_id
|
||||
if not isinstance(command_task_id, int) or command_task_id <= 0:
|
||||
return public.return_message(-1, 0,"Task creation failed:" + command_task_id)
|
||||
|
||||
log_list = []
|
||||
for i in nodes:
|
||||
elog = CommandLog(
|
||||
command_task_id=command_task_id,
|
||||
server_id=i["id"],
|
||||
ssh_host=i["ssh_conf"]["host"],
|
||||
status=0,
|
||||
log_name=i["log_name"],
|
||||
)
|
||||
elog.create_log()
|
||||
log_list.append(elog)
|
||||
|
||||
last_id = e_db.CommandLog.create(log_list)
|
||||
if not isinstance(last_id, int) or last_id <= 0:
|
||||
for i in log_list:
|
||||
i.remove_log()
|
||||
return public.return_message(-1, 0,"Failed to create log:" + last_id)
|
||||
|
||||
script_py = "{}/script/node_command_executor.py command".format(public.get_panel_path())
|
||||
res = public.ExecShell("nohup {} {} {} > /dev/null 2>&1 &".format(
|
||||
public.get_python_bin(), script_py, command_task_id)
|
||||
)
|
||||
|
||||
data_dict = e_task.to_dict()
|
||||
data_dict["log_list"] = [i.to_dict() for i in log_list]
|
||||
data_dict["task_id"] = command_task_id
|
||||
# return json_response(status=True, msg="Created successfully", data=data_dict)
|
||||
return public.return_message(0, 0, data_dict)
|
||||
|
||||
@staticmethod
|
||||
def get_server_info(server_id: int, server_cache) -> dict:
|
||||
server_info = server_cache.get(server_id)
|
||||
if not server_info:
|
||||
server = ServerNodeDB().get_node_by_id(server_id)
|
||||
if not server:
|
||||
server_cache[server_id] = {}
|
||||
else:
|
||||
server_cache[server_id] = server
|
||||
return server_cache[server_id]
|
||||
else:
|
||||
return server_info
|
||||
|
||||
@classmethod
|
||||
def get_task_list(cls, get):
|
||||
page_num = max(int(get.get('p/d', 1)), 1)
|
||||
limit = max(int(get.get('limit/d', 16)), 1)
|
||||
script_type = get.get('script_type/s', "all")
|
||||
if not script_type in ["all", "python", "shell"]:
|
||||
script_type = "all"
|
||||
search = get.get('search', "").strip()
|
||||
|
||||
e_db = TaskFlowsDB()
|
||||
count, tasks = e_db.CommandTask.query_tasks(
|
||||
page=page_num, size=limit, script_type=script_type, search=search
|
||||
)
|
||||
|
||||
res = []
|
||||
server_cache: Dict[int, Dict] = {}
|
||||
for i in tasks:
|
||||
task_dict = i.to_dict()
|
||||
log_list = e_db.CommandLog.query("command_task_id = ?", (i.id,))
|
||||
task_dict["log_list"] = []
|
||||
if i.script_id > 0:
|
||||
s = e_db.Script.find("id = ?", (i.script_id,))
|
||||
if s:
|
||||
task_dict["script_name"] = s.name
|
||||
else:
|
||||
task_dict["script_name"] = "-"
|
||||
|
||||
for j in log_list:
|
||||
tmp = j.to_dict()
|
||||
tmp["server_name"] = cls.get_server_info(j.server_id, server_cache).get("remarks")
|
||||
task_dict["log_list"].append(tmp)
|
||||
|
||||
res.append(task_dict)
|
||||
|
||||
page = public.get_page(count, page_num, limit)
|
||||
page["data"] = res
|
||||
return public.return_message(0, 0,page)
|
||||
|
||||
@classmethod
|
||||
def get_task_info(cls, get):
|
||||
task_id = get.get('task_id/d', 0)
|
||||
if not task_id:
|
||||
return public.return_message(-1, 0,"Task ID cannot be empty")
|
||||
|
||||
e_db = TaskFlowsDB()
|
||||
task = e_db.CommandTask.find("id = ?", (task_id,))
|
||||
if not task:
|
||||
return public.return_message(-1, 0,"Task does not exist")
|
||||
|
||||
task_dict = task.to_dict()
|
||||
task_dict["log_list"] = []
|
||||
server_cache = {}
|
||||
log_list = e_db.CommandLog.query("command_task_id = ?", (task_id,))
|
||||
for i in log_list:
|
||||
tmp = i.to_dict()
|
||||
if i.status != 0:
|
||||
tmp["log"] = i.get_log()
|
||||
tmp["server_name"] = cls.get_server_info(i.server_id, server_cache).get("remarks", "")
|
||||
task_dict["log_list"].append(tmp)
|
||||
|
||||
return public.return_message(0, 0,task_dict)
|
||||
|
||||
@staticmethod
|
||||
def delete_task(get):
|
||||
e_db = TaskFlowsDB()
|
||||
task_id = get.get('task_id/d', 0)
|
||||
if not task_id:
|
||||
return public.return_message(-1, 0,"Task ID cannot be empty")
|
||||
|
||||
task = e_db.CommandTask.find("id = ?", (task_id,))
|
||||
if not task:
|
||||
return public.return_message(-1, 0,"Task does not exist")
|
||||
|
||||
pid_file = "{}/logs/executor_log/{}.pid".format(public.get_panel_path(), task_id)
|
||||
if os.path.exists(pid_file):
|
||||
pid: str = public.readFile(pid_file)
|
||||
if pid and pid.isdigit():
|
||||
public.ExecShell("kill -9 {}".format(pid))
|
||||
os.remove(pid_file)
|
||||
|
||||
log_list = e_db.CommandLog.query("command_task_id = ?", (task_id,))
|
||||
for i in log_list:
|
||||
i.remove_log()
|
||||
e_db.CommandLog.delete(i.id)
|
||||
|
||||
e_db.CommandTask.delete(task_id)
|
||||
return public.return_message(0, 0,"Deleted successfully")
|
||||
|
||||
@staticmethod
|
||||
def batch_delete_task(get):
|
||||
task_ids: List[int] = list_args(get, "task_ids")
|
||||
if not task_ids:
|
||||
return public.return_message(-1, 0,"Please select the task to delete")
|
||||
task_ids = [int(i) for i in task_ids]
|
||||
e_db = TaskFlowsDB()
|
||||
task_list = e_db.CommandTask.query("id IN ({})".format(",".join(["?"] * len(task_ids))), (*task_ids,))
|
||||
if not task_list:
|
||||
return public.return_message(-1, 0,"Task does not exist")
|
||||
for i in task_list:
|
||||
pid_file = "{}/logs/executor_log/{}.pid".format(public.get_panel_path(), i.id)
|
||||
if os.path.exists(pid_file):
|
||||
pid: str = public.readFile(pid_file)
|
||||
if pid and pid.isdigit():
|
||||
public.ExecShell("kill -9 {}".format(pid))
|
||||
os.remove(pid_file)
|
||||
|
||||
log_list = e_db.CommandLog.query("command_task_id = ?", (i.id,))
|
||||
for j in log_list:
|
||||
j.remove_log()
|
||||
e_db.CommandLog.delete(j.id)
|
||||
e_db.CommandTask.delete(i.id)
|
||||
|
||||
return public.return_message(0, 0,"Deleted successfully")
|
||||
|
||||
@staticmethod
|
||||
def retry_task(get):
|
||||
task_id = get.get('task_id/d', 0)
|
||||
if not task_id:
|
||||
return public.return_message(-1, 0,"Task ID cannot be empty")
|
||||
|
||||
log_id = get.get('log_id/d', 0)
|
||||
if not log_id:
|
||||
return public.return_message(-1, 0,"The log ID cannot be empty")
|
||||
|
||||
e_db = TaskFlowsDB()
|
||||
log = e_db.CommandLog.find("id = ? AND command_task_id = ?", (log_id, task_id))
|
||||
if not log:
|
||||
return public.return_message(-1, 0,"log does not exist")
|
||||
|
||||
log.create_log()
|
||||
log.status = 0
|
||||
e_db.CommandLog.update(log)
|
||||
script_py = "{}/script/node_command_executor.py command".format(public.get_panel_path())
|
||||
public.ExecShell("nohup {} {} {} {} > /dev/null 2>&1 &".format(
|
||||
public.get_python_bin(), script_py, task_id, log_id)
|
||||
)
|
||||
return public.return_message(0, 0,"Retry started")
|
||||
|
||||
@staticmethod
|
||||
def node_create_transfer_task(get):
|
||||
try:
|
||||
transfer_task_data = json.loads(get.get('transfer_task_data', "{}"))
|
||||
if not transfer_task_data:
|
||||
return public.return_message(-1, 0,"Parameter error")
|
||||
except Exception as e:
|
||||
return public.return_message(-1, 0,"Parameter error")
|
||||
|
||||
transfer_task_data["flow_id"] = 0
|
||||
transfer_task_data["step_index"] = 0
|
||||
transfer_task_data["src_node"] = {"name": "local"}
|
||||
transfer_task_data["src_node_task_id"] = 0
|
||||
if not isinstance(transfer_task_data["dst_nodes"], dict):
|
||||
return public.return_message(-1, 0,"Please upgrade the version of the main node panel you are currently using")
|
||||
|
||||
fdb = TaskFlowsDB()
|
||||
tt = TransferTask.from_dict(transfer_task_data)
|
||||
task_id = fdb.TransferTask.create(tt)
|
||||
if not task_id:
|
||||
return public.return_message(-1, 0,"Task creation failed")
|
||||
# return json_response(status=True, msg="Created successfully", data={"task_id": task_id})
|
||||
return public.return_message(0, 0, {"task_id": task_id})
|
||||
|
||||
@classmethod
|
||||
def node_transferfile_status_history(cls, get):
|
||||
task_id = get.get('task_id/d', 0)
|
||||
only_error = get.get('only_error/d', 1)
|
||||
if not task_id:
|
||||
return public.return_message(-1, 0,"Task ID cannot be empty")
|
||||
fdb = TaskFlowsDB()
|
||||
ret = fdb.history_transferfile_task(task_id, only_error=only_error==1)
|
||||
fdb.close()
|
||||
# return json_response(status=True, msg="Successfully obtained", data=ret)
|
||||
return public.return_message(0, 0, ret)
|
||||
|
||||
@classmethod
|
||||
def node_proxy_transferfile_status(cls, get):
|
||||
ws: simple_websocket.Server = getattr(get, '_ws', None)
|
||||
if not ws:
|
||||
return public.return_message(-1, 0, "Please use WebSocket connection")
|
||||
|
||||
task_id = get.get('task_id/d', 0)
|
||||
exclude_nodes = list_args(get, "exclude_nodes")
|
||||
the_log_id = get.get('the_log_id/d', 0)
|
||||
if not task_id:
|
||||
ws.send(json.dumps({"type": "end", "msg": "Task ID cannot be empty"}))
|
||||
ws.send("{}")
|
||||
return
|
||||
|
||||
try:
|
||||
exclude_nodes = [int(i) for i in exclude_nodes]
|
||||
except:
|
||||
exclude_nodes = []
|
||||
|
||||
fdb = TaskFlowsDB()
|
||||
task = fdb.TransferTask.get_byid(task_id)
|
||||
if not task:
|
||||
ws.send(json.dumps({"type": "end", "msg": "Task does not exist"}))
|
||||
ws.send("{}")
|
||||
return
|
||||
if the_log_id: # 单任务重试
|
||||
res_data = file_task_run_sync(task_id, the_log_id)
|
||||
if isinstance(res_data, str):
|
||||
ws.send(json.dumps({"type": "error", "msg": res_data}))
|
||||
ws.send("{}")
|
||||
else:
|
||||
ws.send(json.dumps({"type": "end", "data": res_data}))
|
||||
ws.send("{}")
|
||||
fdb.close()
|
||||
return
|
||||
|
||||
if task.status in (0, 3): # 初次执行 或 出错后再次尝试
|
||||
pid = cls._start_task("file", task_id, exclude_nodes=exclude_nodes)
|
||||
elif task.status == 2: # 运行成功了, 获取历史数据并返回
|
||||
ret = fdb.history_transferfile_task(task_id)
|
||||
ws.send(json.dumps({"type": "end", "data": ret}))
|
||||
ws.send("{}")
|
||||
fdb.close()
|
||||
return
|
||||
else: # 还在运行中
|
||||
pid_file = "{}/logs/executor_log/file_{}_0.pid".format(public.get_panel_path(), task_id)
|
||||
if os.path.exists(pid_file):
|
||||
pid = int(public.readFile(pid_file))
|
||||
else:
|
||||
pid = None
|
||||
|
||||
if not pid: # 运行失败, 返回数据库信息
|
||||
ret = fdb.history_transferfile_task(task_id)
|
||||
ws.send(json.dumps({"type": "end", "data": ret}))
|
||||
fdb.close()
|
||||
ws.send("{}")
|
||||
return
|
||||
|
||||
def send_status(soc_data: dict):
|
||||
ws.send(json.dumps({"type": "status", "data": soc_data}))
|
||||
|
||||
err = self_file_running_log(task_id, send_status)
|
||||
if err:
|
||||
ws.send(json.dumps({"type": "error", "msg": err}))
|
||||
|
||||
ret = fdb.history_transferfile_task(task_id)
|
||||
ws.send(json.dumps({"type": "end", "data": ret}))
|
||||
fdb.close()
|
||||
ws.send("{}") # 告诉接收端,数据传输已经结束
|
||||
return
|
||||
|
||||
def run_flow_task(self, get):
|
||||
ws: simple_websocket.Server = getattr(get, '_ws', None)
|
||||
if not ws:
|
||||
return public.return_message(-1, 0,"Please use WebSocket connection")
|
||||
|
||||
public.set_module_logs("nodes_flow_task", "run_flow_task")
|
||||
node_ids = list_args(get, 'node_ids')
|
||||
if not node_ids:
|
||||
ws.send(json.dumps({"type": "error", "msg": "Node ID cannot be empty"}))
|
||||
return
|
||||
try:
|
||||
node_ids = [int(i) for i in node_ids]
|
||||
except:
|
||||
ws.send(json.dumps({"type": "error", "msg": "Node ID format error"}))
|
||||
return
|
||||
|
||||
try:
|
||||
flow_data = get.get('flow_data', '[]')
|
||||
if isinstance(flow_data, str):
|
||||
flow_data = json.loads(flow_data)
|
||||
elif isinstance(flow_data, (list, tuple)):
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
except:
|
||||
ws.send(json.dumps({"type": "error", "msg": "Process data format error"}))
|
||||
return
|
||||
|
||||
strategy = {"run_when_error": True}
|
||||
if "exclude_when_error" in get and get.exclude_when_error not in ("1", "true", 1, True):
|
||||
strategy["exclude_when_error"] = False
|
||||
|
||||
has_cmd_task = False
|
||||
data_src_node = []
|
||||
for i in flow_data:
|
||||
if i["task_type"] == "command":
|
||||
has_cmd_task = True
|
||||
elif i["task_type"] == "file":
|
||||
data_src_node.append(i["src_node_id"])
|
||||
|
||||
nodes_db = ServerNodeDB()
|
||||
used_nodes, target_nodes = [], []
|
||||
srv_cache = ServerMonitorRepo()
|
||||
for i in set(node_ids + data_src_node):
|
||||
n = nodes_db.get_node_by_id(i)
|
||||
if not n:
|
||||
ws.send(json.dumps({"type": "error", "msg": "The node with node ID [{}] does not exist".format(i)}))
|
||||
return
|
||||
n["ssh_conf"] = json.loads(n["ssh_conf"])
|
||||
if has_cmd_task and n["id"] in node_ids and not n["ssh_conf"]:
|
||||
ws.send(json.dumps({"type": "error", "msg": "The node of node [{}] has not enabled SSH".format(n["remarks"])}))
|
||||
return
|
||||
if n["id"] in data_src_node:
|
||||
is_local = n["app_key"] == n["api_key"] == "local"
|
||||
if (not n["app_key"] and not n["api_key"]) or n["lpver"]: # 1panel面板或者 仅有ssh配置的节点无法作为数据源
|
||||
ws.send(json.dumps(
|
||||
{"type": "error", "msg": "Node [{}] is not a pagoda node and cannot be used as a data source".format(n["remarks"])}))
|
||||
return
|
||||
if not is_local:
|
||||
# 检查节点版本号
|
||||
tmp = srv_cache.get_server_status(n["id"])
|
||||
if not tmp or not flow_useful_version(tmp["version"]):
|
||||
ws.send(
|
||||
json.dumps({"type": "error", "msg": "Node [{}] version is too low, please upgrade the node".format(n["remarks"])}))
|
||||
return
|
||||
|
||||
used_nodes.append(n)
|
||||
if n["id"] in node_ids:
|
||||
target_nodes.append(n)
|
||||
|
||||
fdb = TaskFlowsDB()
|
||||
flow, err = fdb.create_flow(used_nodes, target_nodes, strategy, flow_data)
|
||||
if not flow:
|
||||
ws.send(json.dumps({"type": "error", "msg": err}))
|
||||
return
|
||||
fdb.close()
|
||||
|
||||
pid = self._start_task("flow", flow.id)
|
||||
if not pid:
|
||||
ws.send(json.dumps({"type": "error", "msg": "Task startup failed"}))
|
||||
return
|
||||
|
||||
def update_status(data: dict):
|
||||
ws.send(json.dumps({"type": "status", "data": data}))
|
||||
|
||||
err = flow_running_log(flow.id, update_status)
|
||||
if err:
|
||||
ws.send(json.dumps({"type": "error", "msg": err}))
|
||||
# flow_data = fdb.history_flow_task(flow.id)
|
||||
# ws.send(json.dumps({"type": "data", "data": flow_data}))
|
||||
ws.send(json.dumps({"type": "end", "msg": "Mission complete"}))
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def _start_task(cls, task_type: str, task_id: int, exclude_nodes: List[int]=None) -> Optional[int]:
|
||||
pid_file = "{}/logs/executor_log/{}_{}_0.pid".format(public.get_panel_path(), task_type, task_id)
|
||||
if os.path.exists(pid_file):
|
||||
pid = int(public.readFile(pid_file))
|
||||
if psutil.pid_exists(pid):
|
||||
return pid
|
||||
|
||||
script_py = "{}/script/node_command_executor.py".format(public.get_panel_path())
|
||||
cmd = [
|
||||
public.get_python_bin(), script_py,
|
||||
"--task_type={}".format(task_type),
|
||||
"--task_id={}".format(task_id),
|
||||
]
|
||||
|
||||
exclude_nodes = exclude_nodes or []
|
||||
if exclude_nodes:
|
||||
exclude_nodes = [str(i) for i in exclude_nodes if i]
|
||||
exclude_nodes_str = "'{}'".format(",".join(exclude_nodes))
|
||||
cmd.append("--exclude_nodes={}".format(exclude_nodes_str))
|
||||
|
||||
cmd_str = "nohup {} > /dev/null 2>&1 &".format(" ".join(cmd))
|
||||
public.ExecShell(cmd_str)
|
||||
for i in range(60):
|
||||
if os.path.exists(pid_file):
|
||||
pid = int(public.readFile(pid_file))
|
||||
if psutil.pid_exists(pid):
|
||||
return pid
|
||||
time.sleep(0.05)
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def flow_task_status(cls, get):
|
||||
ws: simple_websocket.Server = getattr(get, '_ws', None)
|
||||
if not ws:
|
||||
return public.return_message(-1, 0, "Please use WebSocket connection")
|
||||
|
||||
fdb = TaskFlowsDB()
|
||||
flow = fdb.Flow.last(order_by="id DESC")
|
||||
if flow and flow.status == "running":
|
||||
flow_data = fdb.history_flow_task(flow)
|
||||
ws.send(json.dumps({"type": "status", "data": flow_data}))
|
||||
for t in flow.steps:
|
||||
t: Union[CommandTask, TransferTask]
|
||||
src_node = getattr(t, "src_node", {})
|
||||
is_local_src = src_node.get("address", None) is None
|
||||
if not src_node:
|
||||
task_data = fdb.history_command_task(t.id)
|
||||
elif is_local_src:
|
||||
task_data = fdb.history_transferfile_task(t.id)
|
||||
else:
|
||||
srv = ServerNode(src_node["address"], src_node["api_key"], src_node["app_key"], src_node["remarks"])
|
||||
srv_data = srv.node_transferfile_status_history(t.src_node_task_id)
|
||||
if srv_data["status"]:
|
||||
task_data = srv_data["data"]
|
||||
else:
|
||||
task_data = {
|
||||
"task_id": t.id, "task_type": "file",
|
||||
"count": 0, "complete": 0, "error": 0, "data": []
|
||||
}
|
||||
ws.send(json.dumps({"type": "status", "data": task_data}))
|
||||
|
||||
err = flow_running_log(flow.id, lambda x: ws.send(json.dumps({"type": "status", "data": x})))
|
||||
if err:
|
||||
ws.send(json.dumps({"type": "error", "msg": err}))
|
||||
|
||||
ws.send(json.dumps({"type": "end", "msg": "Mission complete"}))
|
||||
return
|
||||
else:
|
||||
if not flow:
|
||||
ws.send(json.dumps({"type": "no_flow", "msg": "No tasks"})) # 没有任务
|
||||
return
|
||||
flow_data = fdb.history_flow_task(flow.id)
|
||||
ws.send(json.dumps({"type": "end", "last_flow": flow_data}))
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def next_flow_tip(cls, get):
|
||||
return public.return_message(0, 0,"Setup successful")
|
||||
|
||||
@staticmethod
|
||||
def get_flow_info(get):
|
||||
flow_id = get.get("flow_id/d", 0)
|
||||
fdb = TaskFlowsDB()
|
||||
flow = fdb.Flow.get_byid(flow_id)
|
||||
if not flow:
|
||||
return public.return_message(-1, 0,"Task does not exist")
|
||||
|
||||
flow_data = fdb.history_flow_task(flow.id)
|
||||
return public.return_message(0, 0,flow_data)
|
||||
|
||||
@staticmethod
|
||||
def get_command_task_info(get):
|
||||
task_id = get.get("task_id/d", 0)
|
||||
fdb = TaskFlowsDB()
|
||||
task = fdb.CommandTask.get_byid(task_id)
|
||||
if not task:
|
||||
return public.return_message(-1, 0,"Task does not exist")
|
||||
return public.return_message(0, 0,fdb.history_command_task(task.id, only_error=False))
|
||||
|
||||
@staticmethod
|
||||
def get_transferfile_task_info(get):
|
||||
task_id = get.get("task_id/d", 0)
|
||||
fdb = TaskFlowsDB()
|
||||
task = fdb.TransferTask.get_byid(task_id)
|
||||
if not task:
|
||||
return public.return_message(-1, 0,"Task does not exist")
|
||||
|
||||
src_node = task.src_node
|
||||
is_local_src = task.src_node.get("address", None) is None
|
||||
if is_local_src:
|
||||
return public.return_message(0, 0,fdb.history_transferfile_task(task.id, only_error=False))
|
||||
else:
|
||||
srv = ServerNode(src_node["address"], src_node["api_key"], src_node["app_key"], src_node["name"])
|
||||
srv_data = srv.node_transferfile_status_history(task.src_node_task_id, only_error=False)
|
||||
if srv_data["status"]:
|
||||
task_data = srv_data["data"]
|
||||
else:
|
||||
task_data = {
|
||||
"task_id": task.id, "task_type": "file",
|
||||
"count": 0, "complete": 0, "error": 0, "data": []
|
||||
}
|
||||
return public.return_message(0, 0,task_data)
|
||||
|
||||
def flow_task_list(self, get):
|
||||
page_num = max(int(get.get('p/d', 1)), 1)
|
||||
limit = max(int(get.get('limit/d', 16)), 1)
|
||||
|
||||
fdb = TaskFlowsDB()
|
||||
flow_list = fdb.Flow.query_page(page_num=page_num, limit=limit)
|
||||
count = fdb.Flow.count()
|
||||
res = []
|
||||
server_cache: Dict[int, Dict] = {}
|
||||
for flow in flow_list:
|
||||
tmp_data = fdb.history_flow_task(flow.id)
|
||||
tmp_data["server_list"] = [{
|
||||
"id": int(i),
|
||||
"name": self.get_server_info(int(i), server_cache).get("remarks", ""),
|
||||
"server_ip": self.get_server_info(int(i), server_cache).get("server_ip", ""),
|
||||
} for i in tmp_data["server_ids"].strip("|").split("|")]
|
||||
res.append(tmp_data)
|
||||
|
||||
page = public.get_page(count, page_num, limit)
|
||||
page["data"] = res
|
||||
return public.return_message(0, 0,page)
|
||||
|
||||
@staticmethod
|
||||
def remove_flow(get):
|
||||
flow_ids = list_args(get,"flow_ids")
|
||||
if not flow_ids:
|
||||
return public.return_message(-1, 0,"Please select the task to delete")
|
||||
fdb = TaskFlowsDB()
|
||||
flows = fdb.Flow.query(
|
||||
"id IN (%s) AND status NOT IN (?, ?)" % (",".join(["?"]*len(flow_ids))),
|
||||
(*flow_ids, "waiting", "running")
|
||||
)
|
||||
|
||||
command_tasks = fdb.CommandTask.query(
|
||||
"flow_id IN (%s)" % (",".join(["?"]*len(flow_ids))),
|
||||
(*flow_ids,)
|
||||
)
|
||||
|
||||
command_logs = fdb.CommandLog.query(
|
||||
"command_task_id IN (%s)" % (",".join(["?"]*len(flow_ids))),
|
||||
(*flow_ids,)
|
||||
)
|
||||
|
||||
for log in command_logs:
|
||||
try:
|
||||
if os.path.exists(log.log_file):
|
||||
os.remove(log.log_file)
|
||||
except:
|
||||
pass
|
||||
|
||||
fdb.CommandLog.delete([log.id for log in command_logs])
|
||||
fdb.CommandTask.delete([task.id for task in command_tasks])
|
||||
fdb.Flow.delete([flow.id for flow in flows])
|
||||
|
||||
w, p = "flow_id IN (%s)" % (",".join(["?"]*len(flow_ids))), (*flow_ids,)
|
||||
fdb.TransferTask.delete_where(w, p)
|
||||
fdb.TransferLog.delete_where(w, p)
|
||||
fdb.TransferFile.delete_where(w, p)
|
||||
|
||||
return public.return_message(0, 0,"Deleted successfully")
|
||||
|
||||
def retry_flow(self, get):
|
||||
ws: simple_websocket.Server = getattr(get, '_ws', None)
|
||||
if not ws:
|
||||
return public.return_message(-1, 0,"Please use WebSocket connection")
|
||||
|
||||
flow_id = get.get("flow_id/d", 0)
|
||||
flow = TaskFlowsDB().Flow.get_byid(flow_id)
|
||||
if not flow:
|
||||
ws.send(json.dumps({"type": "error", "msg": "Task does not exist"}))
|
||||
return
|
||||
|
||||
if flow.status == "complete":
|
||||
ws.send(json.dumps({"type": "error", "msg": "Task completed, cannot retry"}))
|
||||
return
|
||||
|
||||
def call_status(data):
|
||||
ws.send(json.dumps({"type": "status", "data": data}))
|
||||
|
||||
pid = self._start_task("flow", flow.id)
|
||||
if not pid:
|
||||
ws.send(json.dumps({"type": "error", "msg": "Task startup failed"}))
|
||||
return
|
||||
|
||||
err = flow_running_log(flow.id, call_status)
|
||||
if err:
|
||||
ws.send(json.dumps({"type": "error", "msg": err}))
|
||||
|
||||
ws.send(json.dumps({"type": "end", "msg": "Mission complete"}))
|
||||
return
|
||||
|
||||
# 重试某个单一任务, 如:单机器文件上传或单机器命令执行
|
||||
@staticmethod
|
||||
def retry_flow_task(get):
|
||||
task_type = get.get("task_type/s", "")
|
||||
task_id = get.get("task_id/d", 0)
|
||||
log_id = get.get("log_id/d", 0)
|
||||
if not task_type or not task_id or not log_id:
|
||||
return public.return_message(-1, 0,"Parameter error")
|
||||
if task_type not in ("file", "command"):
|
||||
return public.return_message(-1, 0,"Parameter error")
|
||||
if task_type == "file":
|
||||
ret = file_task_run_sync(task_id, log_id)
|
||||
else:
|
||||
ret = command_task_run_sync(task_id, log_id)
|
||||
if isinstance(ret, str):
|
||||
return public.return_message(-1, 0,ret)
|
||||
# return json_response(status=True, msg="Task has been retried", data=ret)
|
||||
return public.return_message(0, 0, ret)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def stop_flow(get):
|
||||
flow_id = get.get("flow_id/d", 0)
|
||||
if not flow_id:
|
||||
return public.return_message(-1, 0,"Please select the task to stop")
|
||||
pid_file = "{}/logs/executor_log/flow_{}_0.pid".format(public.get_panel_path(), flow_id)
|
||||
if os.path.exists(pid_file):
|
||||
pid = int(public.readFile(pid_file))
|
||||
if psutil.pid_exists(pid):
|
||||
psutil.Process(pid).kill()
|
||||
|
||||
if os.path.exists(pid_file):
|
||||
os.remove(pid_file)
|
||||
|
||||
sock_file = "/tmp/flow_task/flow_task_{}".format(flow_id)
|
||||
if os.path.exists(sock_file):
|
||||
os.remove(sock_file)
|
||||
|
||||
return public.return_message(0, 0,"Task stopped")
|
||||
|
||||
@staticmethod
|
||||
def file_dstpath_check(get):
|
||||
path = get.get("path/s", "")
|
||||
node_ids = list_args(get, "node_ids")
|
||||
if not path or not node_ids:
|
||||
return public.return_message(-1, 0,"Parameter error")
|
||||
|
||||
if path == "/":
|
||||
return public.return_message(-1, 0,"Cannot upload to root directory")
|
||||
|
||||
nodes_db = ServerNodeDB()
|
||||
ret = []
|
||||
|
||||
def check_node(n_data:dict, t_srv: Union[ServerNode, LPanelNode, SSHApi]):
|
||||
res = {"id": n_data["id"], "err": "", "remarks": n_data["remarks"]}
|
||||
err = t_srv.upload_dir_check(path)
|
||||
if err:
|
||||
res["err"] = err
|
||||
ret.append(res)
|
||||
|
||||
th_list = []
|
||||
for i in node_ids:
|
||||
n = nodes_db.get_node_by_id(i)
|
||||
if not n:
|
||||
ret.append({"id": i, "err": "Node does not exist"})
|
||||
n["ssh_conf"] = json.loads(n["ssh_conf"])
|
||||
if n["app_key"] or n["api_key"]:
|
||||
srv = ServerNode.new_by_data(n)
|
||||
elif n["ssh_conf"]:
|
||||
srv = SSHApi(**n["ssh_conf"])
|
||||
else:
|
||||
ret.append({"id": i, "err": "Node configuration error"})
|
||||
continue
|
||||
|
||||
th = threading.Thread(target=check_node, args=(n, srv))
|
||||
th.start()
|
||||
th_list.append(th)
|
||||
|
||||
for th in th_list:
|
||||
th.join()
|
||||
|
||||
# return json_response(status=True, data=ret)
|
||||
return public.return_message(0, 0,ret)
|
||||
|
||||
@staticmethod
|
||||
def create_flow_template(get):
|
||||
err = FlowTemplates.check(get)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
s = FlowTemplates.from_dict(get)
|
||||
# 查重
|
||||
e_db = TaskFlowsDB()
|
||||
if e_db.FlowTemplate.find("name = ?", (s.name,)):
|
||||
return public.return_message(-1, 0,"Script name already exists")
|
||||
err = e_db.FlowTemplate.create(s)
|
||||
if isinstance(err, str):
|
||||
return public.return_message(-1, 0,err)
|
||||
e_db.close()
|
||||
# return json_response(status=True, msg="Created successfully", data=s.to_dict())
|
||||
return public.return_message(0, 0,s.to_dict())
|
||||
|
||||
|
||||
@staticmethod
|
||||
def modify_flow_template(get):
|
||||
err = FlowTemplates.check(get)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
e_db = TaskFlowsDB()
|
||||
ft = FlowTemplates.from_dict(get)
|
||||
if not ft.id:
|
||||
return public.return_message(-1, 0,"Please select the template to modify")
|
||||
if not e_db.FlowTemplate.get_byid(ft.id):
|
||||
return public.return_message(-1, 0,"Template does not exist")
|
||||
err = e_db.FlowTemplate.update(ft)
|
||||
if isinstance(err, str) and err:
|
||||
return public.return_message(-1, 0,err)
|
||||
e_db.close()
|
||||
# return json_response(status=True, msg="Modified successfully", data=ft.to_dict())
|
||||
return public.return_message(0, 0,ft.to_dict())
|
||||
|
||||
@staticmethod
|
||||
def delete_flow_template(get):
|
||||
e_db = TaskFlowsDB()
|
||||
if not get.get("id/d", 0):
|
||||
return public.return_message(-1, 0,"Script ID cannot be empty")
|
||||
try:
|
||||
del_id = int(get.id)
|
||||
except:
|
||||
return public.return_message(-1, 0,"Script ID format error")
|
||||
|
||||
e_db.FlowTemplate.delete(del_id)
|
||||
return public.return_message(0, 0,"Deleted successfully")
|
||||
|
||||
@staticmethod
|
||||
def get_flow_template_list(get):
|
||||
page_num = max(int(get.get('p/d', 1)), 1)
|
||||
limit = max(int(get.get('limit/d', 16)), 1)
|
||||
search = get.get('search', "").strip()
|
||||
|
||||
where_list, params = [], []
|
||||
if search:
|
||||
where_list.append("(name like ? or key_words like ? or description like ?)")
|
||||
params.append("%{}%".format(search))
|
||||
params.append("%{}%".format(search))
|
||||
params.append("%{}%".format(search))
|
||||
|
||||
where = " and ".join(where_list)
|
||||
e_db = TaskFlowsDB()
|
||||
data_list = e_db.FlowTemplate.query_page(where, (*params,), page_num=page_num, limit=limit)
|
||||
count = e_db.FlowTemplate.count(where, params)
|
||||
page = public.get_page(count, page_num, limit)
|
||||
page["data"] = [i.to_dict() for i in data_list]
|
||||
return public.return_message(0, 0,page)
|
||||
|
||||
|
||||
659
mod/project/node/file_transferMod.py
Normal file
659
mod/project/node/file_transferMod.py
Normal file
@@ -0,0 +1,659 @@
|
||||
import json
|
||||
import os.path
|
||||
import traceback
|
||||
from typing import List, Dict, Optional
|
||||
|
||||
import simple_websocket
|
||||
from mod.base import json_response
|
||||
from mod.project.node.dbutil import FileTransfer, FileTransferTask, FileTransferDB, ServerNodeDB
|
||||
from mod.project.node.nodeutil import ServerNode, LocalNode, LPanelNode
|
||||
from mod.project.node.filetransfer import task_running_log, wait_running
|
||||
|
||||
import public
|
||||
|
||||
|
||||
class main():
|
||||
log_dir = "{}/logs/node_file_transfers".format(public.get_panel_path())
|
||||
if not os.path.exists(log_dir):
|
||||
os.makedirs(log_dir)
|
||||
|
||||
@staticmethod
|
||||
def file_upload(args):
|
||||
node_id = args.get('node_id', -1)
|
||||
if node_id == -1:
|
||||
from YakPanel import request
|
||||
node_id = request.form.get('node_id', 0)
|
||||
|
||||
if not node_id:
|
||||
return public.return_message(-1, 0,"node_id is null")
|
||||
|
||||
if isinstance(node_id, str):
|
||||
try:
|
||||
node_id = int(node_id)
|
||||
except:
|
||||
return public.return_message(-1, 0,"node_id is null")
|
||||
|
||||
node = ServerNode.new_by_id(node_id)
|
||||
if not node:
|
||||
return public.return_message(-1, 0,"node not exists")
|
||||
|
||||
return node.upload_proxy()
|
||||
|
||||
@staticmethod
|
||||
def file_download(args):
|
||||
node_id = args.get('node_id', 0)
|
||||
if not node_id:
|
||||
return public.return_message(-1, 0,"node_id is null")
|
||||
|
||||
filename = args.get('filename/s', "")
|
||||
if not filename:
|
||||
return jpublic.return_message(-1, 0,"The filename parameter cannot be empty")
|
||||
if isinstance(node_id, str):
|
||||
try:
|
||||
node_id = int(node_id)
|
||||
except:
|
||||
return public.return_message(-1, 0,"node_id is null")
|
||||
|
||||
node = ServerNode.new_by_id(node_id)
|
||||
if not node:
|
||||
return public.return_message(-1, 0,"node not exists")
|
||||
|
||||
return node.download_proxy(filename)
|
||||
|
||||
@staticmethod
|
||||
def dir_walk(get):
|
||||
path = get.get('path/s', "")
|
||||
if not path:
|
||||
return public.return_message(-1, 0,"The path parameter cannot be empty")
|
||||
|
||||
res_list, err = LocalNode().dir_walk(path)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
return res_list
|
||||
|
||||
@classmethod
|
||||
def create_filetransfer_task(cls, get):
|
||||
ft_db = FileTransferDB()
|
||||
task_data, err = ft_db.get_last_task()
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
if task_data and task_data["status"] not in ("complete", "failed"):
|
||||
return public.return_message(-1, 0,"There are ongoing tasks on the current node, please wait for them to complete before submitting")
|
||||
|
||||
public.set_module_logs("nodes_create_filetransfer_9", "create_filetransfer")
|
||||
source_node_id = get.get('source_node_id/d', -1)
|
||||
target_node_id = get.get('target_node_id/d', -1)
|
||||
source_path_list = get.get('source_path_list/s', "")
|
||||
target_path = get.get('target_path/s', "")
|
||||
default_mode = get.get('default_mode/s', "cover")
|
||||
if default_mode not in ("cover", "ignore", "rename"):
|
||||
return public.return_message(-1, 0,"Default mode parameter error")
|
||||
if source_node_id == target_node_id:
|
||||
return public.return_message(-1, 0,"The source node and target node cannot be the same")
|
||||
if source_node_id == -1 or target_node_id == -1:
|
||||
return public.return_message(-1, 0,"The source or destination node cannot be empty")
|
||||
|
||||
try:
|
||||
source_path_list = json.loads(source_path_list)
|
||||
except:
|
||||
return public.return_message(-1, 0,"Error in the parameter 'sourcew_path_ist'")
|
||||
keys = ("path", "size", "is_dir")
|
||||
for items in source_path_list:
|
||||
if not all(item in keys for item in items.keys()):
|
||||
return public.return_message(-1, 0,"Error in the parameter 'sourcew_path_ist'")
|
||||
if not (isinstance(items["path"], str) and isinstance(items["is_dir"], bool) and
|
||||
isinstance(items["size"], int)):
|
||||
return public.return_message(-1, 0,"Error in the parameter 'sourcew_path_ist'")
|
||||
|
||||
if not target_path:
|
||||
return public.return_message(-1, 0,"The target_cath parameter cannot be empty")
|
||||
node_db = ServerNodeDB()
|
||||
if source_node_id == 0:
|
||||
src_node = node_db.get_local_node()
|
||||
else:
|
||||
src_node = node_db.get_node_by_id(source_node_id)
|
||||
|
||||
|
||||
if not src_node:
|
||||
return public.return_message(-1, 0,"The source node does not exist")
|
||||
if target_node_id == 0:
|
||||
target_node = node_db.get_local_node()
|
||||
else:
|
||||
target_node = node_db.get_node_by_id(target_node_id)
|
||||
if not target_node:
|
||||
return public.return_message(-1, 0,"The target node does not exist")
|
||||
if src_node["id"] == target_node["id"]:
|
||||
return public.return_message(-1, 0,"The source node and target node cannot be the same")
|
||||
|
||||
# public.print_log("src_node:", src_node, "target_node:", target_node)
|
||||
real_create_res: Optional[dict] = None # 实际上创建的结果,创建的任务不在本地时使用
|
||||
if src_node["api_key"] == src_node["app_key"] == "local":
|
||||
return cls._create_filetransfer_task(
|
||||
source_node={
|
||||
"name": "local",
|
||||
},
|
||||
target_node={
|
||||
"name": "{}({})".format(target_node["remarks"], target_node["server_ip"]),
|
||||
"address": target_node["address"],
|
||||
"api_key": target_node["api_key"],
|
||||
"app_key": target_node["app_key"],
|
||||
"node_id": target_node_id,
|
||||
"lpver": target_node["lpver"]
|
||||
},
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
created_by="local",
|
||||
default_mode=default_mode,
|
||||
)
|
||||
elif target_node["api_key"] == target_node["app_key"] == "local":
|
||||
return cls._create_filetransfer_task(
|
||||
source_node={
|
||||
"name": "{}({})".format(src_node["remarks"], src_node["server_ip"]),
|
||||
"address": src_node["address"],
|
||||
"api_key": src_node["api_key"],
|
||||
"app_key": src_node["app_key"],
|
||||
"node_id": source_node_id,
|
||||
"lpver": src_node["lpver"]
|
||||
},
|
||||
target_node={
|
||||
"name": "local",
|
||||
},
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
created_by="local",
|
||||
default_mode=default_mode,
|
||||
)
|
||||
elif src_node["lpver"]:
|
||||
if target_node["lpver"]:
|
||||
return public.return_message(-1, 0,"Cannot support file transfer between 1panel nodes")
|
||||
# 源节点是1panel时,只能下载去目标节点操作
|
||||
if target_node["api_key"] == target_node["app_key"] == "local":
|
||||
return cls._create_filetransfer_task(
|
||||
source_node={
|
||||
"name": "{}".format(target_node["remarks"]) + ("({})".format(target_node["server_ip"]) if target_node["server_ip"] else ""),
|
||||
"address": src_node["address"],
|
||||
"api_key": src_node["api_key"],
|
||||
"app_key": "",
|
||||
"node_id": source_node_id,
|
||||
"lpver": src_node["lpver"]
|
||||
},
|
||||
target_node={
|
||||
"name": "local",
|
||||
},
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
created_by="local",
|
||||
default_mode=default_mode,
|
||||
)
|
||||
else:
|
||||
srv = ServerNode(target_node["address"], target_node["api_key"], target_node["app_key"])
|
||||
real_create_res = srv.node_create_filetransfer_task(
|
||||
source_node={
|
||||
"name": "{}".format(target_node["remarks"]) + ("({})".format(target_node["server_ip"]) if target_node["server_ip"] else ""),
|
||||
"address": src_node["address"],
|
||||
"api_key": src_node["api_key"],
|
||||
"app_key": "",
|
||||
"node_id": source_node_id,
|
||||
"lpver": src_node["lpver"]
|
||||
},
|
||||
target_node={
|
||||
"name": "local",
|
||||
},
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
created_by="{}({})".format(public.GetConfigValue("title"), public.get_server_ip()),
|
||||
default_mode=default_mode
|
||||
)
|
||||
else: # 都是YakPanel 节点的情况下
|
||||
srv = ServerNode(src_node["address"], src_node["api_key"], src_node["app_key"])
|
||||
if srv.filetransfer_version_check():
|
||||
srv = ServerNode(target_node["address"], target_node["api_key"], target_node["app_key"])
|
||||
res = srv.filetransfer_version_check()
|
||||
if res:
|
||||
return public.return_message(-1, 0,"{} Node check error:".format(target_node["remarks"]) + res)
|
||||
real_create_res = srv.node_create_filetransfer_task(
|
||||
source_node={
|
||||
"name": "{}".format(target_node["remarks"]) + ("({})".format(target_node["server_ip"]) if target_node["server_ip"] else ""),
|
||||
"address": src_node["address"],
|
||||
"api_key": src_node["api_key"],
|
||||
"app_key": src_node["app_key"],
|
||||
"node_id": source_node_id,
|
||||
"lpver": src_node["lpver"]
|
||||
},
|
||||
target_node={
|
||||
"name": "local",
|
||||
},
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
created_by="{}({})".format(public.GetConfigValue("title"), public.get_server_ip()),
|
||||
default_mode=default_mode,
|
||||
)
|
||||
else:
|
||||
real_create_res = srv.node_create_filetransfer_task(
|
||||
source_node={
|
||||
"name": "local",
|
||||
},
|
||||
target_node={
|
||||
"name": "{}".format(target_node["remarks"]) + ("({})".format(target_node["server_ip"]) if target_node["server_ip"] else ""),
|
||||
"address": target_node["address"],
|
||||
"api_key": target_node["api_key"],
|
||||
"app_key": target_node["app_key"],
|
||||
"node_id": target_node_id,
|
||||
"lpver": target_node["lpver"]
|
||||
},
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
created_by="{}({})".format(public.GetConfigValue("title"), public.get_server_ip()),
|
||||
default_mode=default_mode,
|
||||
)
|
||||
|
||||
if not real_create_res["status"]:
|
||||
return public.return_message(-1, 0,real_create_res["msg"])
|
||||
|
||||
tt_task_id = real_create_res["data"]["task_id"]
|
||||
db = FileTransferDB()
|
||||
tt = FileTransferTask(
|
||||
source_node={"node_id": source_node_id},
|
||||
target_node={"node_id": target_node_id},
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
task_action=real_create_res["data"]["task_action"],
|
||||
status="running",
|
||||
created_by="local",
|
||||
default_mode=default_mode,
|
||||
target_task_id=tt_task_id,
|
||||
is_source_node=node_db.is_local_node(source_node_id),
|
||||
is_target_node=node_db.is_local_node(target_node_id),
|
||||
)
|
||||
db.create_task(tt)
|
||||
return public.return_message(-1, 0,tt.to_dict())
|
||||
|
||||
@classmethod
|
||||
def node_create_filetransfer_task(cls, get):
|
||||
from YakPanel import g
|
||||
if not g.api_request:
|
||||
return public.return_message(-1, 0,"Unable to activate")
|
||||
|
||||
source_node = get.get("source_node/s", "")
|
||||
target_node = get.get("target_node/s", "")
|
||||
source_path_list = get.get("source_path_list/s", "")
|
||||
target_path = get.get("target_path/s", "")
|
||||
created_by = get.get("created_by/s", "")
|
||||
default_mode = get.get("default_mode/s", "")
|
||||
|
||||
try:
|
||||
source_node = json.loads(source_node)
|
||||
target_node = json.loads(target_node)
|
||||
source_path_list = json.loads(source_path_list)
|
||||
except Exception:
|
||||
return public.return_message(-1, 0,"Parameter error")
|
||||
if not target_path or not created_by or not default_mode or not source_node or not target_node or not source_path_list:
|
||||
return public.return_message(-1, 0,"Parameter loss")
|
||||
|
||||
ft_db = FileTransferDB()
|
||||
task_data, err = ft_db.get_last_task()
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
if task_data and task_data["status"] not in ("complete", "failed"):
|
||||
return public.return_message(-1, 0,"There is an ongoing task on the node, please wait for it to complete before submitting")
|
||||
return cls._create_filetransfer_task(
|
||||
source_node=source_node,
|
||||
target_node=target_node,
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
created_by=created_by,
|
||||
default_mode=default_mode
|
||||
)
|
||||
|
||||
# 实际创建任务
|
||||
# 可能的情况
|
||||
# 1.source_node 是当前节点,target_node 是其他节点, 此时为上传
|
||||
# 2.target_node 是当前节点, 此时为下载
|
||||
@classmethod
|
||||
def _create_filetransfer_task(cls, source_node: dict,
|
||||
target_node: dict,
|
||||
source_path_list: List[dict],
|
||||
target_path: str,
|
||||
created_by: str,
|
||||
default_mode: str = "cover") -> Dict:
|
||||
if source_node["name"] == "local":
|
||||
task_action = "upload"
|
||||
check_node = LocalNode()
|
||||
if target_node["lpver"]:
|
||||
t_node = LPanelNode(target_node["address"], target_node["api_key"], target_node["lpver"])
|
||||
err = t_node.test_conn()
|
||||
else:
|
||||
t_node = ServerNode(target_node["address"], target_node["api_key"], target_node["app_key"])
|
||||
err = t_node.test_conn()
|
||||
# public.print_log(target_node["address"], err)
|
||||
if err:
|
||||
return public.return_message(-1, 0,"{} Node cannot connect, error message: {}".format(target_node["name"], err))
|
||||
elif target_node["name"] == "local":
|
||||
task_action = "download"
|
||||
if source_node["lpver"]:
|
||||
check_node = LPanelNode(source_node["address"], source_node["api_key"], source_node["lpver"])
|
||||
err = check_node.test_conn()
|
||||
else:
|
||||
check_node = ServerNode(source_node["address"], source_node["api_key"], source_node["app_key"])
|
||||
err = check_node.test_conn()
|
||||
# public.print_log(source_node["address"], err)
|
||||
if err:
|
||||
return public.return_message(-1, 0,"{} Node cannot connect, error message: {}".format(source_node["name"], err))
|
||||
else:
|
||||
return public.return_message(-1, 0,"Node information that cannot be processed")
|
||||
|
||||
if check_node.__class__ is ServerNode:
|
||||
ver_check = check_node.filetransfer_version_check()
|
||||
if ver_check:
|
||||
return public.return_message(-1, 0,"{} Node check error::".format(source_node["name"]) + ver_check)
|
||||
|
||||
target_path = target_path.rstrip("/")
|
||||
file_list = []
|
||||
for src_item in source_path_list:
|
||||
if src_item["is_dir"]:
|
||||
f_list, err = check_node.dir_walk(src_item["path"])
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
if not f_list:
|
||||
src_item["dst_file"] = os.path.join(target_path, os.path.basename(src_item["path"]))
|
||||
file_list.append(src_item)
|
||||
else:
|
||||
for f_item in f_list:
|
||||
f_item["dst_file"] = f_item["path"].replace(os.path.dirname(src_item["path"]), target_path)
|
||||
file_list.append(f_item)
|
||||
else:
|
||||
src_item["dst_file"] = os.path.join(target_path, os.path.basename(src_item["path"]))
|
||||
file_list.append(src_item)
|
||||
|
||||
if len(file_list) > 1000:
|
||||
return public.return_message(-1, 0,"More than 1000 files, please compress before transferring")
|
||||
|
||||
db = FileTransferDB()
|
||||
tt = FileTransferTask(
|
||||
source_node=source_node,
|
||||
target_node=target_node,
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
task_action=task_action,
|
||||
status="pending",
|
||||
created_by=created_by,
|
||||
default_mode=default_mode,
|
||||
is_source_node=source_node["name"] == "local",
|
||||
is_target_node=target_node["name"] == "local",
|
||||
)
|
||||
err = db.create_task(tt)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
ft_list = []
|
||||
for f_item in file_list:
|
||||
ft = FileTransfer(
|
||||
task_id=tt.task_id,
|
||||
src_file=f_item["path"],
|
||||
dst_file=f_item["dst_file"],
|
||||
file_size=f_item["size"],
|
||||
is_dir=f_item.get("is_dir", 0),
|
||||
status="pending",
|
||||
progress=0,
|
||||
)
|
||||
ft_list.append(ft)
|
||||
if not ft_list:
|
||||
return public.return_message(-1, 0,"There are no files available for transfer")
|
||||
err = db.batch_create_file_transfers(ft_list)
|
||||
if err:
|
||||
db.delete_task(tt.task_id)
|
||||
return public.return_message(-1, 0,err)
|
||||
|
||||
py_bin = public.get_python_bin()
|
||||
log_file = "{}/task_{}.log".format(cls.log_dir, tt.task_id)
|
||||
start_task = "nohup {} {}/script/node_file_transfers.py {} > {} 2>&1 &".format(
|
||||
py_bin,
|
||||
public.get_panel_path(),
|
||||
tt.task_id,
|
||||
log_file,
|
||||
)
|
||||
res = public.ExecShell(start_task)
|
||||
wait_timeout = wait_running(tt.task_id, timeout=10.0)
|
||||
if wait_timeout:
|
||||
return public.return_message(-1, 0,wait_timeout)
|
||||
return public.return_message(0, 0,tt.to_dict())
|
||||
|
||||
@staticmethod
|
||||
def file_list(get):
|
||||
node_id = get.get("node_id/d", -1)
|
||||
p = get.get("p/d", 1)
|
||||
row = get.get("showRow/d", 50)
|
||||
path = get.get("path/s", "")
|
||||
search = get.get("search/s", "")
|
||||
|
||||
if node_id == -1:
|
||||
return public.return_message(-1, 0,"Node parameter error")
|
||||
if node_id == 0:
|
||||
node = LocalNode()
|
||||
else:
|
||||
node = ServerNode.new_by_id(node_id)
|
||||
|
||||
if not node:
|
||||
return public.return_message(-1, 0,"Node does not exist")
|
||||
|
||||
if not path:
|
||||
return public.return_message(-1, 0,"Path parameter error")
|
||||
|
||||
data, err = node.file_list(path, p, row, search)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
if "status" not in data and "message" not in data:return public.return_message(0, 0,data)
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def delete_file(get):
|
||||
node_id = get.get("node_id/d", -1)
|
||||
if node_id == -1:
|
||||
return public.return_message(-1, 0,"Node parameter error")
|
||||
if node_id == 0:
|
||||
node = LocalNode()
|
||||
else:
|
||||
node = ServerNode.new_by_id(node_id)
|
||||
|
||||
if not node:
|
||||
return public.return_message(-1, 0,"Node does not exist")
|
||||
|
||||
path = get.get("path/s", "")
|
||||
is_dir = get.get("is_dir/d", 0)
|
||||
if not path:
|
||||
return public.return_message(-1, 0,"Path parameter error")
|
||||
res=node.remove_file(path, is_dir=is_dir == 1)
|
||||
if res.get('status',-1)==0: return public.return_message(0, 0,res.get('message',{}).get('result',"File/Directory deleted successfully or moved to recycle bin!"))
|
||||
return public.return_message(-1, 0,res.get('msg',"File delete failed"))
|
||||
# return node.remove_file(path, is_dir=is_dir == 1)
|
||||
|
||||
def transfer_status(self, get):
|
||||
ws: simple_websocket.Server = getattr(get, '_ws', None)
|
||||
if not ws:
|
||||
return jpublic.return_message(-1, 0,"Please use WebSocket connection")
|
||||
|
||||
ft_db = FileTransferDB()
|
||||
task_data, err = ft_db.get_last_task()
|
||||
if err:
|
||||
ws.send(json.dumps({"type": "error", "msg": err}))
|
||||
return
|
||||
if not task_data:
|
||||
ws.send(json.dumps({"type": "end", "msg": "No tasks"}))
|
||||
return
|
||||
task = FileTransferTask.from_dict(task_data)
|
||||
if task.target_task_id:
|
||||
if task.task_action == "upload":
|
||||
run_node_id = task.source_node["node_id"]
|
||||
else:
|
||||
run_node_id = task.target_node["node_id"]
|
||||
run_node = ServerNode.new_by_id(run_node_id)
|
||||
res = run_node.get_transfer_status(task.target_task_id)
|
||||
if not res["status"]:
|
||||
ws.send(json.dumps({"type": "error", "msg": res["msg"]}))
|
||||
return
|
||||
if res["data"]["task"]["status"] in ("complete", "failed"):
|
||||
task.status = res["data"]["task"]["status"]
|
||||
task.completed_at = res["data"]["task"]["completed_at"]
|
||||
ft_db.update_task(task)
|
||||
res_data = res["data"]
|
||||
res_data["type"] = "end"
|
||||
res_data["msg"] = "Mission completed"
|
||||
ws.send(json.dumps(res_data))
|
||||
return
|
||||
|
||||
run_node.proxy_transfer_status(task.target_task_id, ws)
|
||||
else:
|
||||
if task.status in ("complete", "failed"):
|
||||
data, _ = ft_db.last_task_all_status()
|
||||
data.update({"type": "end", "msg": "Mission completed"})
|
||||
ws.send(json.dumps(data))
|
||||
return
|
||||
self._proxy_transfer_status(task, ws)
|
||||
|
||||
def node_proxy_transfer_status(self, get):
|
||||
ws: simple_websocket.Server = getattr(get, '_ws', None)
|
||||
if not ws:
|
||||
return public.return_message(-1, 0, "Please use WebSocket connection")
|
||||
task_id = get.get("task_id/d", 0)
|
||||
if not task_id:
|
||||
ws.send(json.dumps({"type": "error", "msg": "Task ID parameter error"}))
|
||||
return
|
||||
ft_db = FileTransferDB()
|
||||
task_data, err = ft_db.get_task(task_id)
|
||||
if err:
|
||||
ws.send(json.dumps({"type": "error", "msg": err}))
|
||||
return
|
||||
|
||||
task = FileTransferTask.from_dict(task_data)
|
||||
if task.status in ("complete", "failed"):
|
||||
data, _ = ft_db.last_task_all_status()
|
||||
data["type"] = "end"
|
||||
data["msg"] = "Mission completed"
|
||||
ws.send(json.dumps(data))
|
||||
return
|
||||
self._proxy_transfer_status(task, ws)
|
||||
|
||||
@staticmethod
|
||||
def _proxy_transfer_status(task: FileTransferTask, ws: simple_websocket.Server):
|
||||
def call_log(data):
|
||||
if isinstance(data, str):
|
||||
ws.send(json.dumps({"type": "end", "msg": data}))
|
||||
else:
|
||||
if data["task"]["status"] in ("complete", "failed"):
|
||||
data["msg"] = "Mission completed"
|
||||
data["type"] = "end"
|
||||
else:
|
||||
data["type"] = "status"
|
||||
data["msg"] = "Task in progress"
|
||||
ws.send(json.dumps(data))
|
||||
|
||||
task_running_log(task.task_id, call_log)
|
||||
|
||||
@staticmethod
|
||||
def get_transfer_status(get):
|
||||
task_id = get.get("task_id/d", 0)
|
||||
if not task_id:
|
||||
return public.return_message(-1, 0, "Task ID parameter error")
|
||||
|
||||
ft_db = FileTransferDB()
|
||||
task_data, err = ft_db.get_task(task_id)
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
task = FileTransferTask.from_dict(task_data)
|
||||
file_list = ft_db.get_task_file_transfers(task_id)
|
||||
return public.return_message(0, 0, {
|
||||
"task": task.to_dict(),
|
||||
"file_list": file_list,
|
||||
})
|
||||
|
||||
@staticmethod
|
||||
def upload_check(get):
|
||||
node_id = get.get("node_id/d", -1)
|
||||
if node_id == -1:
|
||||
return public.return_message(-1, 0,"Node parameter error")
|
||||
if node_id == 0:
|
||||
node = LocalNode()
|
||||
else:
|
||||
node = ServerNode.new_by_id(node_id)
|
||||
|
||||
if not node:
|
||||
return public.return_message(-1, 0,"Node does not exist")
|
||||
filename = get.get("files/s", "")
|
||||
if "\n" in filename:
|
||||
f_list = filename.split("\n")
|
||||
else:
|
||||
f_list = [filename]
|
||||
res, err = node.upload_check(f_list)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
if 'message' not in res and 'status' not in res:return public.return_message(0, 0,res)
|
||||
return res
|
||||
|
||||
@staticmethod
|
||||
def dir_size(get):
|
||||
node_id = get.get("node_id/d", -1)
|
||||
if node_id < 0:
|
||||
return public.return_message(-1, 0,"Node parameter error")
|
||||
if node_id == 0:
|
||||
node = LocalNode()
|
||||
else:
|
||||
node = ServerNode.new_by_id(node_id)
|
||||
|
||||
if not node:
|
||||
return public.return_message(-1, 0,"Node does not exist")
|
||||
path = get.get("path/s", "")
|
||||
size, err = node.dir_size(path)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
return public.return_message(0, 0, {
|
||||
"size": public.to_size(size),
|
||||
"size_b": size,
|
||||
})
|
||||
|
||||
@staticmethod
|
||||
def create_dir(get):
|
||||
node_id = get.get("node_id/d", -1)
|
||||
if node_id < 0:
|
||||
return public.return_message(-1, 0,"Node parameter error")
|
||||
if node_id == 0:
|
||||
node = LocalNode()
|
||||
else:
|
||||
node = ServerNode.new_by_id(node_id)
|
||||
|
||||
if not node:
|
||||
return public.return_message(-1, 0,"Node does not exist")
|
||||
|
||||
path = get.get("path/s", "")
|
||||
res, err = node.create_dir(path)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
# if res["status"]:
|
||||
# return public.return_message(0, 0,res["msg"])
|
||||
return public.return_message(0, 0,"Successfully created directory")
|
||||
# return res
|
||||
|
||||
@staticmethod
|
||||
def delete_dir(get):
|
||||
node_id = get.get("node_id/d", -1)
|
||||
if node_id < 0:
|
||||
return public.return_message(-1, 0,"Node parameter error")
|
||||
if res["status"]:
|
||||
return public.return_message(0, 0,res["msg"])
|
||||
return public.return_message(-1, 0,res["msg"])
|
||||
|
||||
@staticmethod
|
||||
def node_get_dir(get):
|
||||
node_id = get.get("node_id/d", -1)
|
||||
if node_id < 0:
|
||||
return public.return_message(-1, 0,"Node parameter error")
|
||||
if node_id == 0:
|
||||
node = LocalNode()
|
||||
else:
|
||||
node = ServerNode.new_by_id(node_id)
|
||||
|
||||
if not node:
|
||||
return public.return_message(-1, 0,"Node does not exist")
|
||||
|
||||
search = get.get("search/s", "")
|
||||
disk = get.get("disk/s", "")
|
||||
path = get.get("path/s", "")
|
||||
return node.get_dir(path, search, disk)
|
||||
198
mod/project/node/filetransfer/__init__.py
Normal file
198
mod/project/node/filetransfer/__init__.py
Normal file
@@ -0,0 +1,198 @@
|
||||
import os
|
||||
import time
|
||||
import threading
|
||||
from datetime import datetime
|
||||
|
||||
from .socket_server import StatusServer, StatusClient, register_cleanup
|
||||
from mod.project.node.dbutil import FileTransferDB, FileTransfer, FileTransferTask
|
||||
from mod.project.node.nodeutil import ServerNode, LocalNode, LPanelNode
|
||||
from typing import Optional, Callable, Union
|
||||
|
||||
|
||||
class Filetransfer:
|
||||
SOCKET_FILE_DIR = "/tmp/filetransfer"
|
||||
if not os.path.exists(SOCKET_FILE_DIR):
|
||||
os.mkdir(SOCKET_FILE_DIR)
|
||||
|
||||
def __init__(self, task_id: int):
|
||||
self.ft_db = FileTransferDB()
|
||||
task_data, err = self.ft_db.get_task(task_id)
|
||||
if err is None:
|
||||
raise ValueError(err)
|
||||
|
||||
self.task = FileTransferTask.from_dict(task_data)
|
||||
|
||||
file_list = self.ft_db.get_task_file_transfers(task_id)
|
||||
if not file_list:
|
||||
raise ValueError("task_id:{} file_list is empty".format(task_id))
|
||||
|
||||
self.file_map = {file_data["transfer_id"]: FileTransfer.from_dict(file_data) for file_data in file_list}
|
||||
self.file_count = len(self.file_map)
|
||||
self.file_complete = sum([1 for file in self.file_map.values() if file.status == "complete"])
|
||||
self.file_error = sum([1 for file in self.file_map.values() if file.status == "error"])
|
||||
self.count_size = sum([file.file_size for file in self.file_map.values()])
|
||||
self.complete_size = sum([file.file_size for file in self.file_map.values() if file.status == "complete"])
|
||||
self.current_file_size = 0 # 记录当前文件完成的大小
|
||||
self._srv = StatusServer(self.get_task_status, self.SOCKET_FILE_DIR + "/task_" + str(task_id))
|
||||
|
||||
if self.task.task_action == "upload":
|
||||
self.sn = LocalNode()
|
||||
if self.task.target_node["lpver"]:
|
||||
self.dn = LPanelNode(self.task.target_node["address"], self.task.target_node["api_key"],
|
||||
self.task.target_node["lpver"])
|
||||
else:
|
||||
self.dn = ServerNode(self.task.target_node["address"], self.task.target_node["api_key"],
|
||||
self.task.target_node["app_key"])
|
||||
else:
|
||||
if self.task.source_node["lpver"]:
|
||||
self.sn = LPanelNode(self.task.source_node["address"], self.task.source_node["api_key"],
|
||||
self.task.source_node["lpver"])
|
||||
else:
|
||||
self.sn = ServerNode(self.task.source_node["address"], self.task.source_node["api_key"],
|
||||
self.task.source_node["app_key"])
|
||||
self.dn = LocalNode()
|
||||
|
||||
self._close_func: Optional[Callable]= None
|
||||
|
||||
def get_task_status(self, init: bool = False) -> dict:
|
||||
task_dict = self.task.to_dict()
|
||||
task_dict.update({
|
||||
"file_count": self.file_count,
|
||||
"file_complete": self.file_complete,
|
||||
"file_error": self.file_error,
|
||||
"count_size": self.count_size,
|
||||
"complete_size": self.complete_size,
|
||||
"progress": (self.complete_size + self.current_file_size) * 100 / self.count_size if self.count_size > 0 else 0,
|
||||
})
|
||||
return {
|
||||
"task": task_dict,
|
||||
"file_status_list": [{
|
||||
"source_path": file.src_file,
|
||||
"target_path": file.dst_file,
|
||||
"status": file.status,
|
||||
"progress": file.progress,
|
||||
"log": file.message,
|
||||
} for file in self.file_map.values()],
|
||||
}
|
||||
|
||||
def start_server(self):
|
||||
t = threading.Thread(target=self._srv.start_server, args=(), daemon=True)
|
||||
t.start()
|
||||
register_cleanup(self._srv)
|
||||
def close():
|
||||
self._srv.stop()
|
||||
|
||||
self._close_func = close
|
||||
|
||||
def close(self):
|
||||
if self._close_func is None:
|
||||
return
|
||||
self._close_func()
|
||||
|
||||
def update_status(self):
|
||||
self._srv.update_status()
|
||||
|
||||
def run(self):
|
||||
self.task.status = "running"
|
||||
self.ft_db.update_task(self.task)
|
||||
self.start_server()
|
||||
|
||||
pending_list = [file for file in self.file_map.values() if file.status == "pending"]
|
||||
for file in pending_list:
|
||||
if file.is_dir > 0:
|
||||
# 空文件夹处理部分
|
||||
exits, _ = self.dn.target_file_exits(file.dst_file)
|
||||
if exits:
|
||||
file.progress = 100
|
||||
file.status = "complete"
|
||||
self.ft_db.update_file_transfer(file)
|
||||
continue
|
||||
res, err = self.dn.create_dir(path=file.dst_file)
|
||||
if err:
|
||||
file.progress = 0
|
||||
file.status = "failed"
|
||||
file.message = err
|
||||
else:
|
||||
if res.get("status",False) or res.get("status",-1) == 0:
|
||||
file.progress = 100
|
||||
file.status = "complete"
|
||||
else:
|
||||
file.progress = 0
|
||||
file.status = "failed"
|
||||
file.message = res["msg"]
|
||||
self.ft_db.update_file_transfer(file)
|
||||
continue
|
||||
|
||||
|
||||
file.status = "running"
|
||||
file.started_at = datetime.now()
|
||||
self.ft_db.update_file_transfer(file)
|
||||
|
||||
def call_log(progress, log):
|
||||
file.progress = progress
|
||||
self.current_file_size = file.file_size * progress // 100
|
||||
self.ft_db.update_file_transfer(file)
|
||||
self.update_status()
|
||||
|
||||
if self.task.task_action == "upload":
|
||||
self.ft_db.update_file_transfer(file)
|
||||
res = self.dn.upload_file(
|
||||
filename=file.src_file,
|
||||
target_path=os.path.dirname(file.dst_file),
|
||||
mode=self.task.default_mode,
|
||||
call_log=call_log,
|
||||
)
|
||||
else:
|
||||
self.ft_db.update_file_transfer(file)
|
||||
res = self.sn.download_file(
|
||||
filename=file.src_file,
|
||||
target_path=os.path.dirname(file.dst_file),
|
||||
mode=self.task.default_mode,
|
||||
call_log=call_log,
|
||||
)
|
||||
|
||||
self.current_file_size = 0
|
||||
|
||||
if res:
|
||||
file.status = "failed"
|
||||
file.message = res
|
||||
self.file_error += 1
|
||||
else:
|
||||
file.status = "complete"
|
||||
file.progress = 100
|
||||
self.file_complete += 1
|
||||
self.complete_size += file.file_size
|
||||
|
||||
self.ft_db.update_file_transfer(file)
|
||||
self.update_status()
|
||||
|
||||
if self.file_error == 0:
|
||||
self.task.status = "complete"
|
||||
else:
|
||||
self.task.status = "failed"
|
||||
self.ft_db.update_task(self.task)
|
||||
self.update_status()
|
||||
time.sleep(10)
|
||||
self.close()
|
||||
|
||||
|
||||
def run_file_transfer_task(task_id: int):
|
||||
ft = Filetransfer(task_id)
|
||||
ft.run()
|
||||
|
||||
def task_running_log(task_id: int, call_log: Callable[[Union[str,dict]], None]):
|
||||
socket_file = Filetransfer.SOCKET_FILE_DIR + "/task_" + str(task_id)
|
||||
if not os.path.exists(socket_file):
|
||||
call_log("The task status link does not exist")
|
||||
return
|
||||
s_client = StatusClient(socket_file, callback=call_log)
|
||||
s_client.connect()
|
||||
|
||||
def wait_running(task_id: int, timeout:float = 3.0) -> str:
|
||||
socket_file = Filetransfer.SOCKET_FILE_DIR + "/task_" + str(task_id)
|
||||
while not os.path.exists(socket_file):
|
||||
if timeout <= 0:
|
||||
return "Task startup timeout"
|
||||
timeout -= 0.05
|
||||
time.sleep(0.05)
|
||||
return ""
|
||||
300
mod/project/node/filetransfer/socket_server.py
Normal file
300
mod/project/node/filetransfer/socket_server.py
Normal file
@@ -0,0 +1,300 @@
|
||||
import json
|
||||
import socket
|
||||
import struct
|
||||
import sys
|
||||
import threading
|
||||
import os
|
||||
import atexit
|
||||
from typing import Callable, Any, Union, Tuple, Optional, List
|
||||
|
||||
|
||||
class StatusServer:
|
||||
def __init__(self, get_status_func: Callable[[bool], Any], server_address: Union[str, Tuple[str, int]]):
|
||||
"""
|
||||
初始化服务端
|
||||
:param get_status_func: 获取状态的函数,返回当前进程状态字典, 支持一个参数 init,
|
||||
当init为True时,表示获取初始化状态,否则为更新状态
|
||||
:param server_address: 本地套接字文件路径(Unix域)或 (host, port)(TCP)
|
||||
"""
|
||||
self.get_status_func = get_status_func
|
||||
self.server_address = server_address
|
||||
self.clients: List[socket.socket] = []
|
||||
self.lock = threading.Lock() # 线程锁
|
||||
self.running = False
|
||||
self.server_socket = None
|
||||
|
||||
def handle_client(self, client_socket):
|
||||
"""处理客户端连接"""
|
||||
try:
|
||||
# 发送初始状态
|
||||
new_status = self.get_status_func(True)
|
||||
status_bytes = json.dumps(new_status).encode() # 使用 JSON 更安全
|
||||
packed_data = len(status_bytes).to_bytes(4, 'little') + status_bytes # 添加长度头
|
||||
|
||||
# 添加到客户端列表
|
||||
try:
|
||||
# 分块发送
|
||||
client_socket.sendall(packed_data) # 发送结束标志
|
||||
except Exception as e:
|
||||
print(f"Failed to send update to client: {e}")
|
||||
client_socket.close()
|
||||
return
|
||||
|
||||
with self.lock:
|
||||
self.clients.append(client_socket)
|
||||
|
||||
# 保持连接以支持后续更新
|
||||
while self.running:
|
||||
try:
|
||||
# 可选:接收客户端心跳或命令
|
||||
data = client_socket.recv(1024)
|
||||
if not data:
|
||||
break
|
||||
except:
|
||||
break
|
||||
|
||||
finally:
|
||||
# 关闭连接并从列表中移除
|
||||
client_socket.close()
|
||||
with self.lock:
|
||||
if client_socket in self.clients:
|
||||
self.clients.remove(client_socket)
|
||||
|
||||
def start_server(self):
|
||||
"""启动本地套接字服务端"""
|
||||
self.running = True
|
||||
|
||||
if isinstance(self.server_address, str):
|
||||
# Unix 域套接字
|
||||
self.server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
try:
|
||||
os.unlink(self.server_address)
|
||||
except OSError:
|
||||
if os.path.exists(self.server_address):
|
||||
raise
|
||||
self.server_socket.bind(self.server_address)
|
||||
else:
|
||||
# TCP 套接字
|
||||
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
self.server_socket.bind(self.server_address)
|
||||
|
||||
self.server_socket.listen(5)
|
||||
print(f"Server is listening on {self.server_address}...")
|
||||
|
||||
try:
|
||||
self.running = True
|
||||
while self.running:
|
||||
client_socket, _ = self.server_socket.accept()
|
||||
print("Client connected")
|
||||
|
||||
# 启动新线程处理客户端
|
||||
client_thread = threading.Thread(target=self.handle_client, args=(client_socket,))
|
||||
client_thread.start()
|
||||
except KeyboardInterrupt:
|
||||
print("Shutting down server...")
|
||||
finally:
|
||||
self.stop()
|
||||
|
||||
def stop(self):
|
||||
"""停止服务端并清理资源"""
|
||||
if not self.running:
|
||||
return
|
||||
self.running = False
|
||||
|
||||
with self.lock:
|
||||
for client in self.clients:
|
||||
client.close()
|
||||
self.clients.clear()
|
||||
|
||||
if self.server_socket:
|
||||
self.server_socket.close()
|
||||
self.server_socket = None
|
||||
|
||||
# 清理 Unix 套接字文件
|
||||
if isinstance(self.server_address, str) and os.path.exists(self.server_address):
|
||||
os.remove(self.server_address)
|
||||
print(f"Socket file removed: {self.server_address}")
|
||||
|
||||
def update_status(self, update_data: Optional[dict]=None):
|
||||
"""获取最新的状态并推送给所有客户端"""
|
||||
if not update_data:
|
||||
new_status = self.get_status_func(False)
|
||||
else:
|
||||
new_status = update_data
|
||||
status_bytes = json.dumps(new_status).encode() # 使用 JSON 更安全
|
||||
packed_data = len(status_bytes).to_bytes(4, 'little') + status_bytes # 添加长度头
|
||||
|
||||
with self.lock:
|
||||
for client in self.clients:
|
||||
print("Sending update to client...")
|
||||
print(len(status_bytes), status_bytes, packed_data)
|
||||
try:
|
||||
client.sendall(packed_data) # 直接发送完整数据
|
||||
except Exception as e:
|
||||
print(f"Failed to send update to client: {e}")
|
||||
client.close()
|
||||
if client in self.clients:
|
||||
self.clients.remove(client)
|
||||
|
||||
|
||||
class StatusClient:
|
||||
def __init__(self, server_address, callback=None):
|
||||
"""
|
||||
初始化客户端
|
||||
:param server_address: Unix 域路径(字符串) 或 TCP 地址元组 (host, port)
|
||||
:param callback: 接收到状态更新时的回调函数,接受一个 dict 参数
|
||||
"""
|
||||
self.server_address = server_address
|
||||
self.callback = callback
|
||||
self.sock: Optional[socket.socket] = None
|
||||
self.running = False
|
||||
self.receive_thread = None
|
||||
|
||||
def connect(self):
|
||||
"""连接到服务端"""
|
||||
if isinstance(self.server_address, str):
|
||||
print("Connecting to Unix socket...", self.server_address)
|
||||
# Unix 域套接字
|
||||
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
self.sock.connect(self.server_address)
|
||||
else:
|
||||
# TCP 套接字
|
||||
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.sock.connect(self.server_address)
|
||||
|
||||
print("Connected to server.")
|
||||
|
||||
# 启动接收线程
|
||||
self.running = True
|
||||
self.receive_thread = threading.Thread(target=self._receive_loop, daemon=True)
|
||||
self.receive_thread.start()
|
||||
|
||||
def _receive_loop(self):
|
||||
buffer = b''
|
||||
while self.running:
|
||||
try:
|
||||
# 读取长度头
|
||||
while len(buffer) < 4:
|
||||
data = self.sock.recv(4)
|
||||
if not data:
|
||||
raise ConnectionResetError("Server closed the connection")
|
||||
buffer += data
|
||||
length = int.from_bytes(buffer[:4], 'little')
|
||||
buffer = buffer[4:]
|
||||
|
||||
# 读取完整数据
|
||||
while len(buffer) < length:
|
||||
data = self.sock.recv(length - len(buffer))
|
||||
if not data:
|
||||
raise ConnectionResetError("Server closed the connection")
|
||||
buffer += data
|
||||
message = buffer[:length]
|
||||
buffer = buffer[length:]
|
||||
|
||||
# 解析JSON
|
||||
status = json.loads(message.decode())
|
||||
if self.callback:
|
||||
self.callback(status)
|
||||
except ConnectionResetError as e:
|
||||
print("Connection interrupted:", e)
|
||||
self.disconnect()
|
||||
break
|
||||
except json.JSONDecodeError as e:
|
||||
print("JSON parsing failed:", e)
|
||||
continue
|
||||
except Exception as e:
|
||||
print("reception error:", e)
|
||||
self.disconnect()
|
||||
break
|
||||
|
||||
def disconnect(self):
|
||||
"""断开连接"""
|
||||
self.running = False
|
||||
if self.sock:
|
||||
self.sock.close()
|
||||
self.sock = None
|
||||
print("Disconnected from server.")
|
||||
|
||||
def stop(self):
|
||||
"""停止客户端"""
|
||||
self.disconnect()
|
||||
if self.receive_thread and self.receive_thread.is_alive():
|
||||
self.receive_thread.join()
|
||||
print("Client stopped.")
|
||||
|
||||
def wait_receive(self):
|
||||
if self.receive_thread and self.receive_thread.is_alive():
|
||||
self.receive_thread.join()
|
||||
|
||||
|
||||
# 注册退出清理钩子
|
||||
def register_cleanup(server_instance):
|
||||
def cleanup():
|
||||
server_instance.stop()
|
||||
|
||||
atexit.register(cleanup)
|
||||
|
||||
# # 示例使用
|
||||
# if __name__ == '__main__' and "server" in sys.argv:
|
||||
#
|
||||
# import time
|
||||
#
|
||||
# # 模拟的状态存储
|
||||
# process_status = {
|
||||
# 'process1': 'running',
|
||||
# 'process2': 'stopped',
|
||||
# "big_data": "<AAAAAAAAAAAAAAAAAFFFFFFFFFFFFFFFFFFAAAAFFFFFFFFFFFFFFFAAAAAAAAAAAAAAAAAAAAAAAAA>"
|
||||
# }
|
||||
#
|
||||
# def get_status():
|
||||
# return process_status
|
||||
#
|
||||
# # 设置 Unix 域套接字地址
|
||||
# server_address = './socket_filetransfer.sock'
|
||||
#
|
||||
# # 创建服务端实例
|
||||
# server = StatusServer(get_status, server_address)
|
||||
# register_cleanup(server) # 注册退出时清理
|
||||
#
|
||||
# # 启动服务端线程
|
||||
# server_thread = threading.Thread(target=server.start_server)
|
||||
# server_thread.daemon = True
|
||||
# server_thread.start()
|
||||
#
|
||||
# # 模拟状态更新
|
||||
# try:
|
||||
# while True:
|
||||
# print(">>>>>>>change<<<<<<<<<<<<<<<")
|
||||
# time.sleep(5)
|
||||
# process_status['process1'] = 'stopped' if process_status['process1'] == 'running' else 'running'
|
||||
# server.update_status()
|
||||
#
|
||||
# time.sleep(5)
|
||||
# process_status['process2'] = 'running' if process_status['process2'] == 'stopped' else 'stopped'
|
||||
# server.update_status()
|
||||
# except KeyboardInterrupt:
|
||||
# pass
|
||||
#
|
||||
# # 示例使用
|
||||
# if __name__ == '__main__' and "client" in sys.argv:
|
||||
# # Unix 域套接字示例:
|
||||
# server_address = './socket_filetransfer.sock'
|
||||
#
|
||||
# # 示例回调函数
|
||||
# def on_status_update(status_dict):
|
||||
# print("[Callback] New status received:")
|
||||
# for k, v in status_dict.items():
|
||||
# print(f" - {k}: {v}")
|
||||
#
|
||||
#
|
||||
# client = StatusClient(server_address, callback=on_status_update)
|
||||
# try:
|
||||
# client.connect()
|
||||
#
|
||||
# # 主线程保持运行,防止程序退出
|
||||
# while client.running:
|
||||
# pass
|
||||
# except KeyboardInterrupt:
|
||||
# print("Client shutting down...")
|
||||
# client.stop()
|
||||
561
mod/project/node/nodeMod.py
Normal file
561
mod/project/node/nodeMod.py
Normal file
@@ -0,0 +1,561 @@
|
||||
import json
|
||||
import os.path
|
||||
import threading
|
||||
import traceback
|
||||
import public
|
||||
from mod.base import json_response
|
||||
from mod.base.ssh_executor import test_ssh_config
|
||||
try:
|
||||
from mod.project.node.nodeutil import ServerNode, LocalNode, monitor_node_once_with_timeout
|
||||
except:
|
||||
# 定义处理h11的命令变量
|
||||
cmd_h11 = "cd /www/server/panel/pyenv/bin && source activate && H11_VERSION=$(./pip3 show h11 | grep -i Version | awk '{print $2}') && if [ \"$H11_VERSION\" != \"0.14.0\" ]; then ./pip3 uninstall h11 -y; fi; ./pip3 install h11==0.14.0"
|
||||
|
||||
# 定义处理wsproto的命令变量
|
||||
cmd_wsproto = "cd /www/server/panel/pyenv/bin && source activate && WSPROTO_VERSION=$(./pip3 show wsproto | grep -i Version | awk '{print $2}') && if [ \"$WSPROTO_VERSION\" != \"1.2.0\" ]; then ./pip3 uninstall wsproto -y; fi; ./pip3 install wsproto==1.2.0"
|
||||
public.ExecShell(cmd_h11)
|
||||
public.ExecShell(cmd_wsproto)
|
||||
from mod.project.node.nodeutil import ServerNode, LocalNode, monitor_node_once_with_timeout
|
||||
from mod.project.node.dbutil import Node, ServerNodeDB, ServerMonitorRepo
|
||||
from mod.project.node.task_flow import flow_useful_version
|
||||
|
||||
|
||||
class main():
|
||||
node_db_obj = ServerNodeDB()
|
||||
node_db_file =node_db_obj._DB_FILE
|
||||
def __init__(self):
|
||||
# self.node_db_obj = ServerNodeDB()
|
||||
self.tip_file = public.get_panel_path() + "/data/mod_node_used.pl"
|
||||
self.show_mode_file = public.get_panel_path() + "/data/mod_node_show_mode.pl"
|
||||
|
||||
def add_node(self, get):
|
||||
"""
|
||||
增加节点
|
||||
:param get: address节点地址 api_key节点API Key remarks节点备注 category_id节点分类ID
|
||||
:return:
|
||||
"""
|
||||
ssh_conf = get.get('ssh_conf', "{}")
|
||||
try:
|
||||
get.ssh_conf = json.loads(ssh_conf)
|
||||
except Exception:
|
||||
return public.return_message(-1, 0,"SSH_conf data format error")
|
||||
n, err = Node.from_dict(get)
|
||||
if not n:
|
||||
return public.return_message(-1, 0, err)
|
||||
public.set_module_logs("nodes_node_adds_9", "add_node")
|
||||
if n.app_key or n.api_key:
|
||||
err = ServerNode.check_api_key(n)
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
else:
|
||||
# ssh 节点,不用处理
|
||||
pass
|
||||
|
||||
n.server_ip = n.parse_server_ip()
|
||||
err = ServerNodeDB().create_node(n)
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
node = ServerNodeDB().get_node_by_id(n.id)
|
||||
if node:
|
||||
monitor_node_once_with_timeout(node)
|
||||
return public.return_message(0, 0, "Node added successfully")
|
||||
|
||||
@staticmethod
|
||||
def bind_app(get):
|
||||
n, err = Node.from_dict(get)
|
||||
if not n:
|
||||
return public.return_message(-1, 0, err)
|
||||
if not n.app_key:
|
||||
return public.return_message(-1, 0, "Please specify the app key to bind to")
|
||||
srv = ServerNode("", "", n.app_key)
|
||||
res = srv.app_bind()
|
||||
if res:
|
||||
return public.return_message(-1, 0, res)
|
||||
else:
|
||||
return public.return_message(0, 0, "Binding request has been sent out")
|
||||
|
||||
@staticmethod
|
||||
def bind_app_status(get):
|
||||
n, err = Node.from_dict(get)
|
||||
if not n:
|
||||
return public.return_message(-1, 0, err)
|
||||
if not n.app_key:
|
||||
return public.return_message(-1, 0, "Please specify the app key to bind to")
|
||||
srv = ServerNode("", "", n.app_key)
|
||||
res = srv.app_bind_status()
|
||||
if res:
|
||||
return public.return_message(-1, 0, res)
|
||||
else:
|
||||
return public.return_message(0, 0, "Binding successful")
|
||||
|
||||
|
||||
def del_node(self, get):
|
||||
"""
|
||||
删除节点
|
||||
:param get: ids节点ID
|
||||
:return:
|
||||
"""
|
||||
node_ids = get.get('ids', "")
|
||||
if not node_ids:
|
||||
return public.return_message(-1, 0, "Node ID cannot be empty, at least one")
|
||||
try:
|
||||
node_ids = json.loads(node_ids)
|
||||
if not isinstance(node_ids, list) and isinstance(node_ids, int):
|
||||
node_ids = [node_ids]
|
||||
except Exception:
|
||||
return public.return_message(-1, 0, "The format of the node ID data passed in is incorrect")
|
||||
|
||||
srv_db = ServerNodeDB()
|
||||
for node_id in node_ids:
|
||||
if srv_db.is_local_node(node_id):
|
||||
continue
|
||||
err = srv_db.delete_node(node_id)
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
return public.return_message(0, 0, "Node deleted successfully")
|
||||
|
||||
def update_node(self, get):
|
||||
"""
|
||||
更新节点
|
||||
:param get: id节点ID address节点地址 api_key节点API Key remarks节点备注 category_id节点分类ID
|
||||
:return:
|
||||
"""
|
||||
node_id = get.get('id/d', 0)
|
||||
ssh_conf = get.get('ssh_conf', "{}")
|
||||
try:
|
||||
get.ssh_conf = json.loads(ssh_conf)
|
||||
except Exception:
|
||||
return public.return_message(-1, 0, "SSH_conf data format error")
|
||||
if not node_id:
|
||||
return public.return_message(-1, 0, "Node ID cannot be empty")
|
||||
n, err = Node.from_dict(get)
|
||||
if not n:
|
||||
return public.return_message(-1, 0, err)
|
||||
if n.app_key or n.api_key:
|
||||
err = ServerNode.check_api_key(n)
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
|
||||
n.server_ip = n.parse_server_ip()
|
||||
srv_db = ServerNodeDB()
|
||||
err = srv_db.update_node(n, with_out_fields=["id", "status", "error", "error_num"])
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
node = ServerNodeDB().get_node_by_id(n.id)
|
||||
if node:
|
||||
monitor_node_once_with_timeout(node)
|
||||
return public.return_message(0, 0, "Node update successful")
|
||||
|
||||
def default_show_mode(self) -> str:
|
||||
if not os.path.exists(self.show_mode_file):
|
||||
return "list"
|
||||
show_mode = public.readFile(self.show_mode_file)
|
||||
if not show_mode:
|
||||
return "list"
|
||||
if show_mode not in ["list", "block"]:
|
||||
return "list"
|
||||
return show_mode
|
||||
|
||||
def set_show_mode(self, mode_name: str):
|
||||
if mode_name not in ["list", "block"]:
|
||||
return False
|
||||
if mode_name == "block":
|
||||
public.set_module_logs("node_show_block", "node_show_block")
|
||||
public.writeFile(self.show_mode_file, mode_name)
|
||||
return True
|
||||
|
||||
def get_node_list(self, get):
|
||||
"""
|
||||
获取节点列表
|
||||
:param get: p页码 limit每页数量 search搜索关键字 category_id分类ID
|
||||
:return:
|
||||
"""
|
||||
page_num = max(int(get.get('p/d', 1)), 1)
|
||||
limit = max(int(get.get('limit/d', 10)), 10)
|
||||
search = get.get('search', "").strip()
|
||||
category_id = get.get('category_id/d', -1)
|
||||
refresh = get.get('refresh/s', "")
|
||||
show_mode = get.get('show_mode/s', "")
|
||||
if not show_mode or show_mode not in ["list", "block"]:
|
||||
show_mode = self.default_show_mode()
|
||||
else:
|
||||
if not self.set_show_mode(show_mode):
|
||||
show_mode = self.default_show_mode()
|
||||
|
||||
if show_mode == "block": # 返回所有数据
|
||||
page_num = 1
|
||||
limit = 9999999
|
||||
|
||||
srv_db = ServerNodeDB()
|
||||
data, err = srv_db.get_node_list(search, category_id, (page_num - 1) * limit, limit)
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
|
||||
if refresh and refresh == "1":
|
||||
th_list = []
|
||||
for node in data:
|
||||
th = threading.Thread(target=monitor_node_once_with_timeout, args=(node,5))
|
||||
th.start()
|
||||
th_list.append(th)
|
||||
|
||||
for th in th_list:
|
||||
th.join()
|
||||
|
||||
for node in data:
|
||||
if isinstance(node["ssh_conf"], str):
|
||||
node["ssh_conf"] = json.loads(node["ssh_conf"])
|
||||
if isinstance(node["error"], str):
|
||||
node["error"] = json.loads(node["error"])
|
||||
if node["app_key"] == "local" and node["api_key"] == "local":
|
||||
node["address"] = public.getPanelAddr()
|
||||
if node["lpver"] and not node["remarks"].endswith(" | 1Panel"):
|
||||
node["remarks"] = node["remarks"] + " | 1Panel"
|
||||
node_data = self.get_node_data(node)
|
||||
node['data'] = node_data
|
||||
count = srv_db.node_count(search, category_id)
|
||||
page = public.get_page(count, page_num, limit)
|
||||
page["data"] = data
|
||||
page["show_mode"] = show_mode
|
||||
return public.return_message(0, 0,page)
|
||||
|
||||
@staticmethod
|
||||
def get_node_data(node: dict):
|
||||
if node["app_key"] == "local" and node["api_key"] == "local":
|
||||
data = ServerMonitorRepo.get_local_server_status()
|
||||
else:
|
||||
srv_m = ServerMonitorRepo()
|
||||
if srv_m.is_reboot_wait(node["server_ip"]):
|
||||
return {'status': 4, 'msg': "Server restart in progress..."}
|
||||
# public.print_log("get_node_data-------------------------1:{}".format(node["id"]))
|
||||
data = srv_m.get_server_status(node['id'])
|
||||
# public.print_log("get_node_data------------data----------2---:{}".format(data))
|
||||
if data:
|
||||
cpu_data = data.get('cpu', {})
|
||||
memory_data = data.get('mem', {})
|
||||
if cpu_data and memory_data:
|
||||
return {
|
||||
'status': 0,
|
||||
'cpu': cpu_data[0],
|
||||
'cpu_usage': cpu_data[1],
|
||||
'memory': round(float(memory_data['memRealUsed']) / float(memory_data['memTotal']) * 100, 2),
|
||||
'mem_usage': memory_data['memRealUsed'],
|
||||
'memNewTotal': memory_data.get('memNewTotal', "") or public.to_size(
|
||||
memory_data['memTotal'] * 1024 * 1024)
|
||||
}
|
||||
return {'status': 2, 'msg': "Failed to obtain node data"}
|
||||
|
||||
def add_category(self, get):
|
||||
"""
|
||||
添加分类
|
||||
:param get:
|
||||
:return:
|
||||
"""
|
||||
name = get.get('name', "").strip()
|
||||
srv_db = ServerNodeDB()
|
||||
if not name:
|
||||
return public.return_message(-1, 0, "Classification name cannot be empty")
|
||||
if srv_db.category_exites(name):
|
||||
return public.return_message(-1, 0, "The category name already exists")
|
||||
err = srv_db.create_category(name)
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
return public.return_message(0, 0, "Category added successfully")
|
||||
|
||||
def del_category(self, get):
|
||||
"""
|
||||
删除分类
|
||||
:param get:
|
||||
:return:
|
||||
"""
|
||||
category_id = get.get('id/d', 0)
|
||||
if not category_id:
|
||||
return public.return_message(-1, 0, "Classification ID cannot be empty")
|
||||
srv_db = ServerNodeDB()
|
||||
if srv_db.category_exites(category_id):
|
||||
srv_db.delete_category(category_id)
|
||||
|
||||
return public.return_message(0, 0, "Category deleted successfully")
|
||||
|
||||
def bind_node_to_category(self, get):
|
||||
"""
|
||||
绑定节点到分类 可以批量绑定
|
||||
:param get: 如果传入单个node_id则是绑定单个,如果是传入列表则批量绑定
|
||||
:return:
|
||||
"""
|
||||
node_ids = get.get('ids', "")
|
||||
category_id = get.get('category_id/d', 0)
|
||||
try:
|
||||
node_ids = json.loads(node_ids)
|
||||
if not isinstance(node_ids, list) and isinstance(node_ids, int):
|
||||
node_ids = [node_ids]
|
||||
except Exception:
|
||||
return public.return_message(-1, 0, "Node ID format error")
|
||||
|
||||
if not node_ids:
|
||||
return public.return_message(-1, 0, "Node ID cannot be empty, at least one")
|
||||
|
||||
if category_id < 0:
|
||||
return public.return_message(-1, 0, "Classification ID cannot be empty")
|
||||
|
||||
srv_db = ServerNodeDB()
|
||||
err = srv_db.bind_category_to_node(node_ids, category_id)
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
return public.return_message(0, 0, "Node grouping modification successful")
|
||||
|
||||
def get_category_list(self, get):
|
||||
"""
|
||||
获取分类列表
|
||||
:param get:
|
||||
:return:
|
||||
"""
|
||||
try:
|
||||
categorys = public.S('category', self.node_db_obj._DB_FILE).select()
|
||||
return public.return_message(0, 0, categorys)
|
||||
except Exception:
|
||||
public.print_log(traceback.print_exc())
|
||||
return public.return_message(-1, 0, "Data query failed")
|
||||
|
||||
@staticmethod
|
||||
def get_panel_url(get):
|
||||
"""
|
||||
获取目标面板的访问url
|
||||
:param get: address节点地址 api_key节点API Key
|
||||
:return:
|
||||
"""
|
||||
node_id = get.get('node_id/d', 0)
|
||||
if not node_id:
|
||||
return public.return_message(-1, 0, "node_id cannot be empty")
|
||||
srv = ServerNode.new_by_id(node_id)
|
||||
if not srv:
|
||||
return public.return_message(-1, 0, "node_id does not exist")
|
||||
token, err = srv.get_tmp_token()
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
target_panel_url = srv.origin + "/login?tmp_token=" + token
|
||||
return public.return_message(0, 0, {'target_panel_url': target_panel_url})
|
||||
|
||||
@classmethod
|
||||
def get_all_node(cls, get):
|
||||
"""
|
||||
@route /mod/node/node/get_all_node
|
||||
@param query: str
|
||||
@return: [
|
||||
{
|
||||
"node_id": int,
|
||||
"remarks": str,
|
||||
"ip": str,
|
||||
}
|
||||
]
|
||||
"""
|
||||
query_type = get.get('node_type/s', "api")
|
||||
field_str = "id,remarks,server_ip,address,app_key,api_key,lpver,category_id,error_num,ssh_conf"
|
||||
if query_type == "api":
|
||||
data = public.S('node', cls.node_db_file).where("app_key != '' or api_key != ''", ()).field(field_str).select()
|
||||
elif query_type == "ssh":
|
||||
data = public.S('node', self.node_db_obj._DB_FILE).field(field_str).where("ssh_conf != '{}'", ()).select()
|
||||
elif query_type == "file_src":
|
||||
data = public.S('node', self.node_db_obj._DB_FILE).field(field_str).where(
|
||||
"(app_key != '' or api_key != '') and lpver = ''", ()).select()
|
||||
else: # all 除本机之外的节点
|
||||
data = public.S('node', self.node_db_obj._DB_FILE).where("api_key != 'local'", ()).field(field_str).select()
|
||||
|
||||
srv_cache = ServerMonitorRepo()
|
||||
for i in data:
|
||||
i["has_ssh"] = bool(json.loads(i["ssh_conf"]))
|
||||
i.pop("ssh_conf")
|
||||
i["is_local"] = (i["app_key"] == "local" and i["api_key"] == "local")
|
||||
i.pop("app_key")
|
||||
i.pop("api_key")
|
||||
if i["server_ip"] == "":
|
||||
server_ip = ServerNode.get_node_ip(i['id'])
|
||||
if server_ip:
|
||||
i["server_ip"] = server_ip
|
||||
if i["lpver"] and not i["remarks"].endswith(" | 1Panel"):
|
||||
i["remarks"] = i["remarks"] + " | 1Panel"
|
||||
|
||||
tmp_data = srv_cache.get_server_status(i['id'])
|
||||
tmp_data = tmp_data or {}
|
||||
if query_type == "file_src":
|
||||
if not tmp_data and not i["is_local"]:
|
||||
i["version"] = ""
|
||||
i["useful_version"] = True
|
||||
continue
|
||||
if not i["is_local"] or not flow_useful_version(tmp_data.get('version', "")):
|
||||
continue
|
||||
else:
|
||||
if not tmp_data:
|
||||
i["version"] = ""
|
||||
i["useful_version"] = True
|
||||
continue
|
||||
i['version'] = tmp_data.get('version', "")
|
||||
i['useful_version'] = cls._useful_version(i['version'])
|
||||
|
||||
return public.return_message(0, 0, data)
|
||||
|
||||
@staticmethod
|
||||
def _useful_version(ver: str):
|
||||
try:
|
||||
if ver == "1Panel":
|
||||
return True
|
||||
ver_list = [int(i) for i in ver.split(".")]
|
||||
if ver_list[0] >= 10:
|
||||
return True
|
||||
elif ver_list[0] == 9 and ver_list[1] >= 7:
|
||||
return True
|
||||
except:
|
||||
pass
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def get_node_sites(get):
|
||||
"""
|
||||
@route /mod/node/node/get_node_sites
|
||||
@param node_id: int
|
||||
@param query: str
|
||||
@return: [
|
||||
{
|
||||
"node_id": int,
|
||||
"site_id": int,
|
||||
"site_name": str,
|
||||
"site_port": int
|
||||
}
|
||||
]
|
||||
"""
|
||||
node_id = get.get('node_id/d', 0)
|
||||
if not node_id:
|
||||
return public.return_message(-1, 0, "node_id cannot be empty")
|
||||
srv = ServerNode.new_by_id(node_id)
|
||||
if not srv:
|
||||
return public.return_message(-1, 0, "node_id does not exist")
|
||||
data_list, err = srv.php_site_list()
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
return public.return_message(0, 0, data_list)
|
||||
|
||||
@staticmethod
|
||||
def php_site_list(get):
|
||||
"""
|
||||
@route /mod/node/node/php_site_list
|
||||
@return: [
|
||||
{
|
||||
"site_id": int,
|
||||
"site_name": str,
|
||||
"ports": []int,
|
||||
"domains": []str,
|
||||
"ssl":bool
|
||||
}
|
||||
]
|
||||
"""
|
||||
return LocalNode().php_site_list()[0]
|
||||
|
||||
def node_used_status(self, get):
|
||||
if os.path.exists(self.tip_file):
|
||||
return public.return_message(0, 0, "Used")
|
||||
return public.return_message(-1, 0, "Unused")
|
||||
|
||||
def set_used_status(self, get):
|
||||
# 定义处理h11的命令变量
|
||||
cmd_h11 = "cd /www/server/panel/pyenv/bin && source activate && H11_VERSION=$(./pip3 show h11 | grep -i Version | awk '{print $2}') && if [ \"$H11_VERSION\" != \"0.14.0\" ]; then ./pip3 uninstall h11 -y; fi; ./pip3 install h11==0.14.0"
|
||||
|
||||
# 定义处理wsproto的命令变量
|
||||
cmd_wsproto = "cd /www/server/panel/pyenv/bin && source activate && WSPROTO_VERSION=$(./pip3 show wsproto | grep -i Version | awk '{print $2}') && if [ \"$WSPROTO_VERSION\" != \"1.2.0\" ]; then ./pip3 uninstall wsproto -y; fi; ./pip3 install wsproto==1.2.0"
|
||||
public.ExecShell(cmd_h11)
|
||||
public.ExecShell(cmd_wsproto)
|
||||
if os.path.exists(self.tip_file):
|
||||
os.remove(self.tip_file)
|
||||
else:
|
||||
public.set_module_logs("nodes_installed_9", "set_used_status")
|
||||
public.writeFile(self.tip_file, "True")
|
||||
return public.return_message(0, 0, "Setup successful")
|
||||
|
||||
|
||||
@staticmethod
|
||||
def remove_ssh_conf(get):
|
||||
node_id = get.get("node_id/d", 0)
|
||||
ServerNodeDB().remove_node_ssh_conf(node_id)
|
||||
return public.return_message(0, 0, "Deleted successfully")
|
||||
|
||||
@staticmethod
|
||||
def set_ssh_conf(get):
|
||||
"""设置ssh配置信息"""
|
||||
host = get.get("host/s", "")
|
||||
port = get.get("port/d", 22)
|
||||
username = get.get("username/s", "root")
|
||||
password = get.get("password/s", "")
|
||||
pkey = get.get("pkey/s", "")
|
||||
pkey_passwd = get.get("pkey_passwd/s", "")
|
||||
node_id = get.get("node_id/d", 0)
|
||||
test_case = get.get("test_case/d", 0)
|
||||
|
||||
if not node_id and not test_case:
|
||||
return public.return_message(-1, 0, "Node does not exist")
|
||||
|
||||
if not host and node_id:
|
||||
host = ServerNode.get_node_ip(node_id)
|
||||
if not username:
|
||||
username = "root"
|
||||
if not host or not username or not port:
|
||||
return public.return_message(-1, 0, "Host IP, host port, and user name cannot be empty")
|
||||
if not password and not pkey:
|
||||
return public.return_message(-1, 0, "Password or key cannot be empty")
|
||||
|
||||
res = test_ssh_config(host, port, username, password, pkey, pkey_passwd)
|
||||
if res:
|
||||
return public.return_message(-1, 0, res)
|
||||
if test_case:
|
||||
return public.return_message(0, 0, "Test successful")
|
||||
ServerNodeDB().set_node_ssh_conf(node_id, {
|
||||
"host": host,
|
||||
"port": port,
|
||||
"username": username,
|
||||
"password": password,
|
||||
"pkey": pkey,
|
||||
"pkey_passwd": pkey_passwd
|
||||
})
|
||||
return public.return_message(0, 0, "Setup successful")
|
||||
|
||||
@staticmethod
|
||||
def get_sshd_port(get):
|
||||
node_id = get.get("node_id/d", 0)
|
||||
srv = ServerNode.new_by_id(node_id)
|
||||
if not srv:
|
||||
return public.return_message(-1, 0, "Node does not exist")
|
||||
|
||||
port = srv.get_sshd_port()
|
||||
if not port:
|
||||
port = 22
|
||||
return public.return_message(0, 0, {"port": port})
|
||||
|
||||
|
||||
@staticmethod
|
||||
def restart_bt_panel(get):
|
||||
node_id = get.get("node_id/d", 0)
|
||||
srv = ServerNode.new_by_id(node_id)
|
||||
if not srv:
|
||||
return public.return_message(-1, 0, "Node does not exist")
|
||||
if srv.is_local:
|
||||
return public.return_message(-1, 0, "The local node does not support this operation")
|
||||
ret = srv.restart_bt_panel()
|
||||
if ret.get("status"):
|
||||
return public.return_message(0, 0, ret.get("msg"))
|
||||
else:
|
||||
return public.return_message(-1, 0, ret.get("msg"))
|
||||
|
||||
|
||||
@staticmethod
|
||||
def server_reboot(get):
|
||||
node_id = get.get("node_id/d", 0)
|
||||
srv = ServerNode.new_by_id(node_id)
|
||||
if not srv:
|
||||
return public.return_message(-1, 0, "Node does not exist")
|
||||
if srv.is_local:
|
||||
return public.return_message(-1, 0, "The local node does not support this operation")
|
||||
repo = ServerMonitorRepo()
|
||||
if repo.is_reboot_wait(srv.node_server_ip):
|
||||
return public.return_message(-1, 0, "Node is restarting, please try again later")
|
||||
ret = srv.server_reboot()
|
||||
if ret.get("status"):
|
||||
return public.return_message(0, 0, ret.get("msg"))
|
||||
else:
|
||||
return public.return_message(-1, 0, ret.get("msg"))
|
||||
11
mod/project/node/nodeutil/__init__.py
Normal file
11
mod/project/node/nodeutil/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from .base import *
|
||||
from .ssh_wrap import SSHApi
|
||||
|
||||
__all__ = [
|
||||
"ServerNode",
|
||||
"LocalNode",
|
||||
"LPanelNode",
|
||||
"monitor_node_once_with_timeout",
|
||||
"monitor_node_once",
|
||||
"SSHApi"
|
||||
]
|
||||
1961
mod/project/node/nodeutil/base.py
Normal file
1961
mod/project/node/nodeutil/base.py
Normal file
File diff suppressed because it is too large
Load Diff
573
mod/project/node/nodeutil/one_panel_api.py
Normal file
573
mod/project/node/nodeutil/one_panel_api.py
Normal file
@@ -0,0 +1,573 @@
|
||||
import os.path
|
||||
import shutil
|
||||
import traceback
|
||||
from uuid import uuid4
|
||||
|
||||
import requests
|
||||
import time
|
||||
import hashlib
|
||||
import json
|
||||
from typing import Optional, List, Any, Tuple, Dict
|
||||
|
||||
|
||||
class OnePanelApiClient:
|
||||
def __init__(self, panel_address, api_key, ver: str = "v2", timeout: int = 20):
|
||||
"""
|
||||
初始化 OnePanel API 客户端
|
||||
|
||||
Args:
|
||||
panel_address (str): 1Panel 的访问地址 (例如: "http://your_server_ip:4004")
|
||||
api_key (str): 您的 1Panel API Key
|
||||
"""
|
||||
self.panel_address = panel_address
|
||||
self.api_key = api_key
|
||||
self.ver = ver
|
||||
self.timeout = timeout
|
||||
self._call_err: Optional[Exception] = None
|
||||
|
||||
def _generate_token(self):
|
||||
"""生成 1Panel API token 和时间戳"""
|
||||
timestamp = str(int(time.time()))
|
||||
sign_string = f"1panel{self.api_key}{timestamp}"
|
||||
md5_hash = hashlib.md5(sign_string.encode()).hexdigest()
|
||||
return md5_hash, timestamp
|
||||
|
||||
def _call_api(self, method, endpoint, json_data=None):
|
||||
"""发送 API 请求"""
|
||||
token, timestamp = self._generate_token()
|
||||
headers = {
|
||||
"1Panel-Token": token,
|
||||
"1Panel-Timestamp": timestamp,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
url = "{}{}".format(self.panel_address, endpoint)
|
||||
|
||||
# print(f"Calling API: {method} {url}")
|
||||
try:
|
||||
response = requests.request(method, url, headers=headers, json=json_data, timeout=self.timeout)
|
||||
response.raise_for_status() # 检查 HTTP 错误 (例如 4xx 或 5xx)
|
||||
print(f"API Response Status: {response.status_code}")
|
||||
return response.json()
|
||||
except requests.exceptions.RequestException as e:
|
||||
self._call_err = e
|
||||
print(f"API call failed: {e}")
|
||||
return None
|
||||
except Exception as e:
|
||||
self._call_err = e
|
||||
print(f"API call failed: {e}")
|
||||
return None
|
||||
|
||||
def add_website(self, site_name: str, port: int, **kwargs):
|
||||
"""
|
||||
添加网站
|
||||
"""
|
||||
endpoint = "/api/{}/websites".format(self.ver)
|
||||
return self._call_api("POST", endpoint, json_data={
|
||||
"primaryDomain": site_name,
|
||||
"type": "static",
|
||||
"alias": site_name,
|
||||
"remark": kwargs.get("ps") if kwargs.get("ps", None) else "Pagoda yakpanel load balancing station",
|
||||
"appType": "installed",
|
||||
"webSiteGroupId": 1,
|
||||
"otherDomains": "",
|
||||
"proxy": "",
|
||||
"appinstall": {
|
||||
"appId": 0,
|
||||
"name": "",
|
||||
"appDetailId": 0,
|
||||
"params": {},
|
||||
"version": "",
|
||||
"appkey": "",
|
||||
"advanced": False,
|
||||
"cpuQuota": 0,
|
||||
"memoryLimit": 0,
|
||||
"memoryUnit": "MB",
|
||||
"containerName": "",
|
||||
"allowPort": False
|
||||
},
|
||||
"IPV6": False,
|
||||
"enableFtp": False,
|
||||
"ftpUser": "",
|
||||
"ftpPassword": "",
|
||||
"proxyType": "tcp",
|
||||
"port": 9000,
|
||||
"proxyProtocol": "http://",
|
||||
"proxyAddress": "",
|
||||
"runtimeType": "php",
|
||||
"taskID": str(uuid4()),
|
||||
"createDb": False,
|
||||
"dbName": "",
|
||||
"dbPassword": "",
|
||||
"dbFormat": "utf8mb4",
|
||||
"dbUser": "",
|
||||
"dbType": "mysql",
|
||||
"dbHost": "",
|
||||
"enableSSL": False,
|
||||
"domains": [
|
||||
{
|
||||
"domain": site_name,
|
||||
"port": port,
|
||||
"ssl": False
|
||||
}
|
||||
],
|
||||
"siteDir": ""
|
||||
})
|
||||
|
||||
def check_site_create(self, site_name: str) -> Optional[int]:
|
||||
endpoint = "/api/{}/websites/search".format(self.ver)
|
||||
res_data = self._call_api("POST", endpoint, json_data={
|
||||
"name": site_name,
|
||||
"page": 1,
|
||||
"pageSize": 10,
|
||||
"orderBy": "favorite",
|
||||
"order": "descending",
|
||||
"websiteGroupId": 0,
|
||||
"type": "static"
|
||||
})
|
||||
|
||||
if res_data is not None and "data" in res_data and isinstance(res_data["data"], dict):
|
||||
for item in res_data["data"].get("items", {}):
|
||||
if item["alias"] == site_name:
|
||||
return item["id"]
|
||||
return None
|
||||
|
||||
def get_websites(self):
|
||||
"""
|
||||
获取所有网站信息
|
||||
|
||||
Returns:
|
||||
dict: API 返回结果 (网站列表),失败返回 None
|
||||
"""
|
||||
# 示例接口路径,请根据您的 Swagger 文档修改
|
||||
endpoint = "/api/{}/websites/list".format(self.ver)
|
||||
return self._call_api("GET", endpoint)
|
||||
|
||||
def add_website_domain(self, website_id: int, new_domain: str, port: int):
|
||||
"""
|
||||
设置网站域名
|
||||
"""
|
||||
# 示例接口路径和参数,请根据您的 Swagger 文档修改
|
||||
endpoint = "/api/{}/websites/domains".format(self.ver)
|
||||
return self._call_api("POST", endpoint, json_data={
|
||||
"websiteID": website_id,
|
||||
"domains": [
|
||||
{
|
||||
"domain": new_domain,
|
||||
"port": port,
|
||||
"ssl": False
|
||||
}
|
||||
],
|
||||
"domainStr": ""
|
||||
})
|
||||
|
||||
def website_domains(self, website_id: int):
|
||||
"""
|
||||
获取网站域名列表
|
||||
"""
|
||||
endpoint = "/api/{}/websites/domains/{website_id}".format(self.ver, website_id=website_id)
|
||||
return self._call_api("GET", endpoint)
|
||||
|
||||
def list_file_test(self):
|
||||
endpoint = "/api/{}/files/search".format(self.ver)
|
||||
return self._call_api("POST", endpoint, json_data={
|
||||
"containSub": False,
|
||||
"dir": True,
|
||||
"expand": True,
|
||||
"isDetail": True,
|
||||
"page": 0,
|
||||
"pageSize": 0,
|
||||
"path": "/",
|
||||
"search": "",
|
||||
"showHidden": True,
|
||||
"sortBy": "",
|
||||
"sortOrder": ""
|
||||
})
|
||||
|
||||
def list_file(self, path: str) -> Tuple[List[Dict], str]:
|
||||
endpoint = "/api/{}/files/search".format(self.ver)
|
||||
res = self._call_api("POST", endpoint, json_data={
|
||||
"containSub": False,
|
||||
"expand": True,
|
||||
"isDetail": True,
|
||||
"page": 1,
|
||||
"pageSize": 1000,
|
||||
"path": path,
|
||||
"search": "",
|
||||
"showHidden": True,
|
||||
"sortBy": "name",
|
||||
"sortOrder": "ascending"
|
||||
})
|
||||
if res is None:
|
||||
return [], "Failed to retrieve file list"
|
||||
if res["code"] != 200:
|
||||
return [], res["message"]
|
||||
if res["data"]["itemTotal"] > 1000:
|
||||
return [], "The number of directory files exceeds 1000, please compress before operating"
|
||||
elif res["data"]["itemTotal"] == 0:
|
||||
return [], ""
|
||||
return [] if res["data"]["items"] is None else res["data"]["items"], ""
|
||||
|
||||
def files_search(self, path: str, page: int, page_size: int, search: str):
|
||||
endpoint = "/api/{}/files/search".format(self.ver)
|
||||
res = self._call_api("POST", endpoint, json_data={
|
||||
"containSub": False,
|
||||
"expand": True,
|
||||
"isDetail": True,
|
||||
"page": page,
|
||||
"pageSize": page_size,
|
||||
"path": path,
|
||||
"search": search,
|
||||
"showHidden": True,
|
||||
"sortBy": "name",
|
||||
"sortOrder": "ascending"
|
||||
})
|
||||
if res is None:
|
||||
return {}, "Failed to retrieve file list"
|
||||
elif res["code"] != 200:
|
||||
return {}, res["message"]
|
||||
return res["data"], ""
|
||||
|
||||
def test_ver(self) -> bool:
|
||||
self.ver = "v2"
|
||||
self._call_err = None
|
||||
res_data = self.list_file_test()
|
||||
if res_data is None and isinstance(self._call_err, json.JSONDecodeError):
|
||||
self.ver = "v1"
|
||||
res_data = self.list_file_test()
|
||||
if isinstance(res_data, dict):
|
||||
return True
|
||||
elif isinstance(res_data, dict):
|
||||
return True
|
||||
return False
|
||||
|
||||
def system_status(self):
|
||||
endpoint = "/api/{}/dashboard/current".format(self.ver)
|
||||
if self.ver == "v1":
|
||||
return self._call_api("POST", endpoint, json_data={
|
||||
"scope": "basic",
|
||||
"ioOption": "all",
|
||||
"netOption": "all"
|
||||
})
|
||||
else:
|
||||
return self._call_api("GET", endpoint + "/all/all")
|
||||
|
||||
def open_port(self, port: int, protocol: str):
|
||||
endpoint = "/api/{}/hosts/firewall/port".format(self.ver)
|
||||
return self._call_api("POST", endpoint, json_data={
|
||||
"protocol": protocol,
|
||||
"source": "anyWhere",
|
||||
"strategy": "accept",
|
||||
"port": str(port),
|
||||
"description": "aaaa",
|
||||
"operation": "add",
|
||||
"address": ""
|
||||
})
|
||||
|
||||
def ws_shell(self, work_dir: str, cmd: str) -> Optional[str]:
|
||||
import websocket
|
||||
import base64
|
||||
import threading
|
||||
from urllib.parse import urlencode, urlparse
|
||||
if self.ver != "v2":
|
||||
return None
|
||||
try:
|
||||
pre_command = "PS1="" && stty -echo && clear && cd {}".format(work_dir, cmd)
|
||||
p = {
|
||||
"cols": 80,
|
||||
"rows": 24,
|
||||
"command": pre_command,
|
||||
"operateNode": "local"
|
||||
}
|
||||
token, timestamp = self._generate_token()
|
||||
u = urlparse(self.panel_address)
|
||||
url = ("{}://{}/api/{}/hosts/terminal?{}".format
|
||||
("ws" if u.scheme == "http" else "wss", u.netloc, self.ver, urlencode(p)))
|
||||
ws = websocket.WebSocket()
|
||||
ws.connect(url, header={"1Panel-Token": token, "1Panel-Timestamp": timestamp, })
|
||||
if not cmd.endswith("\n"):
|
||||
cmd += "\n"
|
||||
ws.send(json.dumps({"type": "cmd", "data": base64.b64encode(cmd.encode("utf-8")).decode("utf-8")}))
|
||||
res_str = ""
|
||||
|
||||
wait = False
|
||||
|
||||
def close_timeout():
|
||||
time.sleep(5)
|
||||
if wait:
|
||||
ws.close()
|
||||
|
||||
threading.Thread(target=close_timeout).start()
|
||||
|
||||
while True:
|
||||
wait = True
|
||||
result = ws.recv()
|
||||
wait = False
|
||||
if result == "":
|
||||
break
|
||||
res_data = json.loads(result)
|
||||
if res_data["type"] == "cmd":
|
||||
res_str += base64.b64decode(res_data["data"]).decode("utf-8")
|
||||
|
||||
if pre_command in res_str:
|
||||
res_str = res_str[res_str.index(pre_command) + len(pre_command):]
|
||||
|
||||
res_str = res_str.strip()
|
||||
real_data = []
|
||||
for line in res_str.split("\r\n"):
|
||||
if line[0] == '\x1b':
|
||||
continue
|
||||
real_data.append(line)
|
||||
|
||||
real_data = "\n".join(real_data)
|
||||
with open("test.txt", "w") as f:
|
||||
f.write(real_data)
|
||||
return real_data
|
||||
except Exception as e:
|
||||
print("error:{}".format(str(e)))
|
||||
traceback.print_exc()
|
||||
return None
|
||||
|
||||
def chunkupload(self,
|
||||
upload_name: str,
|
||||
target_path: str,
|
||||
chunk: Any, chunk_index: int, chunk_count: int) -> Tuple[str, Optional[dict]]:
|
||||
token, timestamp = self._generate_token()
|
||||
header = {"User-Agent": "Yak-Panel/Node Manager", "1Panel-Token": token, "1Panel-Timestamp": timestamp}
|
||||
files = {'chunk': ("chunk", chunk, 'application/octet-stream')}
|
||||
data = {
|
||||
'path': target_path,
|
||||
'filename': upload_name,
|
||||
'chunkIndex': chunk_index,
|
||||
'chunkCount': chunk_count,
|
||||
}
|
||||
url = "{}/api/{}/files/chunkupload".format(self.panel_address, self.ver)
|
||||
try:
|
||||
resp = requests.post(url, data=data, files=files, headers=header, verify=False, timeout=self.timeout)
|
||||
if not resp.status_code == 200:
|
||||
return "The response status code for uploading the file is incorrect. Please check if the node address and API are correct. The current status code is {}, and the return message is:{}".format(
|
||||
resp.status_code, resp.text), None
|
||||
|
||||
return "", None if len(resp.text) < 3 else json.loads(resp.text)
|
||||
except Exception as e:
|
||||
return "Upload file: {} failed with error message:{}".format(upload_name, str(e)), None
|
||||
|
||||
def upload(self, filename: str, target_path: str, upload_name: str) -> str:
|
||||
token, timestamp = self._generate_token()
|
||||
header = {"User-Agent": "Yak-Panel/Node Manager", "1Panel-Token": token, "1Panel-Timestamp": timestamp}
|
||||
try:
|
||||
with open(filename, 'rb') as f:
|
||||
file_data = f.read()
|
||||
except Exception as e:
|
||||
return "File {} failed to open, please check file permissions, error message is:{}".format(filename, str(e))
|
||||
|
||||
files = {'file': (upload_name, file_data, 'application/octet-stream')}
|
||||
data = {
|
||||
'path': target_path,
|
||||
'overwrite': True
|
||||
}
|
||||
url = "{}/api/{}/files/upload".format(self.panel_address, self.ver)
|
||||
try:
|
||||
resp = requests.post(url, data=data, files=files, headers=header, verify=False, timeout=self.timeout)
|
||||
if not resp.status_code == 200:
|
||||
return "The response status code for uploading the file is incorrect. Please check if the node address and API are correct. The current status code is {}, and the return message is:{}".format(
|
||||
resp.status_code, resp.text)
|
||||
if not resp.json()["code"] == 200:
|
||||
return "Upload file failed with error message:{}".format(resp.json()["message"])
|
||||
return ""
|
||||
except Exception as e:
|
||||
return "Upload file: {} failed with error message:{}".format(filename, str(e))
|
||||
|
||||
def files_exits(self, paths: List[str]) -> Optional[dict]:
|
||||
endpoint = "/api/{}/files/batch/check".format(self.ver)
|
||||
return self._call_api("POST", endpoint, json_data={
|
||||
"paths": paths,
|
||||
})
|
||||
|
||||
def download_file(self, filename: str, target_path: str, download_name: str, **kwargs) -> str:
|
||||
data = self.files_exits([filename])
|
||||
file_size: Optional[int] = None
|
||||
if not isinstance(data, dict):
|
||||
return "Request file: {} status failed".format(filename)
|
||||
for i in data["data"]:
|
||||
if i["path"] == filename:
|
||||
file_size = i["size"]
|
||||
break
|
||||
if file_size is None:
|
||||
return "File {} does not exist, skip download".format(filename)
|
||||
try:
|
||||
if not os.path.isdir(target_path):
|
||||
os.makedirs(target_path)
|
||||
except Exception as e:
|
||||
return "Failed to create folder {}, please check folder permissions, error message is:{}".format(target_path, str(e))
|
||||
|
||||
if file_size == 0:
|
||||
fp = open(os.path.join(target_path, download_name), "w")
|
||||
fp.close()
|
||||
return ""
|
||||
|
||||
tmp_file = os.path.join(target_path, "{}.{}".format(download_name, uuid4().hex))
|
||||
try:
|
||||
if not os.path.exists(target_path):
|
||||
os.makedirs(target_path)
|
||||
fb = open(tmp_file, 'wb')
|
||||
except Exception as e:
|
||||
return "Failed to create temporary file {}, please check folder permissions, error message is:{}".format(tmp_file, str(e))
|
||||
|
||||
call_log = lambda *args, **keyword_args: None
|
||||
if "call_log" in kwargs and callable(kwargs["call_log"]):
|
||||
call_log = kwargs["call_log"]
|
||||
try:
|
||||
for i in range(0, file_size, 1024 * 1024 * 5):
|
||||
start = i
|
||||
end = min(i + 1024 * 1024 * 5 - 1, file_size - 1)
|
||||
url = "{}/api/{}/files/chunkdownload".format(self.panel_address, self.ver)
|
||||
data = {
|
||||
'path': filename,
|
||||
'name': os.path.basename(filename),
|
||||
}
|
||||
token, timestamp = self._generate_token()
|
||||
header = {"User-Agent": "Yak-Panel/Node Manager", "1Panel-Token": token, "1Panel-Timestamp": timestamp}
|
||||
header.update({"Range": "bytes={}-{}".format(start, end)})
|
||||
resp = requests.post(url, json=data, headers=header, verify=False, stream=True, timeout=self.timeout)
|
||||
if resp.status_code != 206:
|
||||
return "The response status code for downloading the file is incorrect. Please check if the node address and API are correct. The current status code is {}, and the response header for the returned message is:{}".format(
|
||||
resp.status_code, resp.headers)
|
||||
fb.write(resp.content)
|
||||
call_log(end // file_size, "File Download:{} -> {}, Downloaded size:{}".format(filename, target_path, end))
|
||||
fb.flush()
|
||||
if fb.tell() != file_size:
|
||||
print(fb.tell(), file_size)
|
||||
return "Download file {} failed with error message:{}".format(filename, "files are different sizes")
|
||||
else:
|
||||
fb.close()
|
||||
shutil.move(tmp_file, os.path.join(target_path, download_name))
|
||||
return ""
|
||||
except Exception as e:
|
||||
return "Download file {} failed with error message:{}".format(filename, str(e))
|
||||
finally:
|
||||
if not fb.closed:
|
||||
fb.close()
|
||||
if os.path.exists(tmp_file):
|
||||
os.remove(tmp_file)
|
||||
|
||||
def dir_walk(self, path: str) -> Tuple[List[dict], str]:
|
||||
dirs = [path]
|
||||
res = []
|
||||
count = 0
|
||||
empty_dir = []
|
||||
while dirs:
|
||||
dir_path = dirs.pop(0)
|
||||
try:
|
||||
files, err = self.list_file(dir_path)
|
||||
except Exception as e:
|
||||
return [], str(e)
|
||||
if err:
|
||||
return [], err
|
||||
if not files:
|
||||
empty_dir.append(dir_path)
|
||||
for i in files:
|
||||
if i["isDir"]:
|
||||
dirs.append(i["path"])
|
||||
else:
|
||||
res.append({
|
||||
"path": i["path"],
|
||||
"size": i["size"],
|
||||
"is_dir": 0
|
||||
})
|
||||
count += 1
|
||||
if count > 1000:
|
||||
return [], "The number of directory files exceeds 1000, please compress before operating"
|
||||
|
||||
return [{"path": i, "size": 0, "is_dir": 1} for i in empty_dir] + res, ""
|
||||
|
||||
def remove_file(self, path: str, is_dir: bool) -> str:
|
||||
return self._call_api("POST", "/api/{}/files/del".format(self.ver), json_data={
|
||||
"isDir": is_dir,
|
||||
"path": path,
|
||||
"forceDelete": False
|
||||
})
|
||||
|
||||
def download_proxy(self, filename: str):
|
||||
try:
|
||||
url = "{}/api/{}/files/download".format(self.panel_address, self.ver)
|
||||
token, timestamp = self._generate_token()
|
||||
header = {"User-Agent": "Yak-Panel/Node Manager", "1Panel-Token": token, "1Panel-Timestamp": timestamp}
|
||||
resp = requests.get(url, params={
|
||||
"operateNode": "local",
|
||||
"path": filename
|
||||
}, headers=header, stream=True, verify=False, timeout=self.timeout)
|
||||
if not resp.status_code == 200:
|
||||
return "The response status code for downloading the file is incorrect. Please check if the node address and API are correct. The current status code is {}, and the return message is:{}".format(
|
||||
resp.status_code, resp.text)
|
||||
|
||||
from flask import send_file, stream_with_context, Response
|
||||
filename = os.path.basename(filename)
|
||||
if resp.headers.get("Content-Disposition", "").find("filename=") != -1:
|
||||
filename = resp.headers.get("Content-Disposition", "").split("filename=")[1]
|
||||
|
||||
def generate():
|
||||
for chunk in resp.iter_content(chunk_size=1024 * 1024 * 5):
|
||||
if chunk:
|
||||
yield chunk
|
||||
|
||||
# 设置响应头
|
||||
headers = {
|
||||
'Content-Type': resp.headers.get('Content-Type', 'application/octet-stream'),
|
||||
'Content-Disposition': 'attachment; filename="{}"'.format(filename),
|
||||
'Content-Length': resp.headers.get('Content-Length', ''),
|
||||
'Accept-Ranges': 'bytes'
|
||||
}
|
||||
|
||||
# 使用 stream_with_context 确保请求上下文在生成器运行时保持活跃
|
||||
return Response(
|
||||
stream_with_context(generate()),
|
||||
headers=headers,
|
||||
direct_passthrough=True
|
||||
)
|
||||
except Exception as e:
|
||||
return "Download file: {} failed with error message:{}".format(filename, traceback.format_exc())
|
||||
|
||||
def dir_size(self, path: str):
|
||||
return self._call_api("POST", "/api/{}/files/size".format(self.ver), json_data={
|
||||
"path": path
|
||||
})
|
||||
|
||||
def get_sshd_config(self) -> Optional[dict]:
|
||||
res = self._call_api("POST", "/api/{}/hosts/ssh/search".format(self.ver))
|
||||
if res is None:
|
||||
return None
|
||||
if res["code"] == 200:
|
||||
return res.get("data", {})
|
||||
return None
|
||||
|
||||
def create_dir(self, path: str):
|
||||
return self._call_api("POST", "/api/{}/files".format(self.ver), {
|
||||
"content": "",
|
||||
"isDir": True,
|
||||
"isLink": False,
|
||||
"isSymlink": False,
|
||||
"linkPath": "",
|
||||
"mode": 0,
|
||||
"path": path,
|
||||
"sub": False
|
||||
})
|
||||
|
||||
def restart_panel(self):
|
||||
return self._call_api("POST", "/api/{}/dashboard/system/restart/{}".format(self.ver, "1panel"))
|
||||
|
||||
def server_reboot(self):
|
||||
return self._call_api("POST", "/api/{}/dashboard/system/restart/{}".format(self.ver, "system"))
|
||||
|
||||
def get_file_body(self, path: str) -> Tuple[Optional[dict], str]:
|
||||
res = self._call_api("POST", "/api/{}/files/content".format(self.ver), json_data={
|
||||
"path": path,
|
||||
"expand":True,
|
||||
"isDetail": False,
|
||||
"page":1,
|
||||
"pageSize":100
|
||||
})
|
||||
if res is None:
|
||||
return None, "Failed to retrieve file content"
|
||||
if res["code"] == 200:
|
||||
return res.get("data", {}), ""
|
||||
return None, res.get("message")
|
||||
129
mod/project/node/nodeutil/rsync_api.py
Normal file
129
mod/project/node/nodeutil/rsync_api.py
Normal file
@@ -0,0 +1,129 @@
|
||||
import json
|
||||
from typing import Optional, Union, Tuple, List, Any, Dict
|
||||
|
||||
from .base import ServerNode, LocalNode
|
||||
|
||||
import public
|
||||
|
||||
|
||||
class _RsyncAPIBase:
|
||||
|
||||
def has_rsync_perm(self) -> bool:
|
||||
raise NotImplementedError()
|
||||
|
||||
def is_setup_rsync(self) -> bool:
|
||||
raise NotImplementedError()
|
||||
|
||||
def add_module(self, path: str, name: str, password: str, add_white_ips: List[str]) -> Tuple[Optional[dict], str]:
|
||||
raise NotImplementedError()
|
||||
|
||||
def add_send_task(self, sou):
|
||||
pass
|
||||
|
||||
def get_secretkey(self, ip_type: str = "local_ip") -> Tuple[str, str]:
|
||||
pass
|
||||
|
||||
def check_receiver_conn(self, secret_key: str, work_type: int) -> Tuple[Dict, str]:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
class BtLocalRsyncAPI(LocalNode, _RsyncAPIBase):
|
||||
@classmethod
|
||||
def new_by_id(cls, node_id: int) -> Optional['BtLocalRsyncAPI']:
|
||||
node_data = public.S('node', self.node_db_obj._DB_FILE).where('id=?', (node_id,)).find()
|
||||
if not node_data:
|
||||
return None
|
||||
|
||||
if node_data["api_key"] == "local" and node_data["app_key"] == "local":
|
||||
return BtLocalRsyncAPI()
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _plugin_func(func_name: str, **kwargs) -> Any:
|
||||
from panelPlugin import panelPlugin
|
||||
return panelPlugin().a(public.to_dict_obj({
|
||||
"name": "rsync",
|
||||
"s": func_name,
|
||||
**kwargs,
|
||||
}))
|
||||
|
||||
def has_rsync_perm(self) -> bool:
|
||||
from panelPlugin import panelPlugin
|
||||
res = panelPlugin().a(public.to_dict_obj({"name": "rsync"}))
|
||||
if not res["status"]:
|
||||
return False
|
||||
return True
|
||||
|
||||
def is_setup_rsync(self) -> bool:
|
||||
from panelPlugin import panelPlugin
|
||||
res = panelPlugin().get_soft_find(public.to_dict_obj({"sName": "rsync"}))
|
||||
try:
|
||||
return res["setup"]
|
||||
except:
|
||||
return False
|
||||
|
||||
def add_module(self, path: str, name: str, password: str, add_white_ips: List[str]) -> Tuple[Optional[dict], str]:
|
||||
res = self._plugin_func("add_module", **{
|
||||
"path": path,
|
||||
"mName": name,
|
||||
"password": password,
|
||||
"add_white_ips": json.dumps(add_white_ips)
|
||||
})
|
||||
return res, ""
|
||||
|
||||
|
||||
class BtRsyncAPI(ServerNode, _RsyncAPIBase):
|
||||
|
||||
def _plugin_api_func(self, func_name: str, **kwargs) -> Tuple[Any, str]:
|
||||
return self._request("/plugin", "a", pdata={
|
||||
"name": "rsync",
|
||||
"s": func_name,
|
||||
**kwargs
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def new_by_id(cls, node_id: int) -> Optional['BtRsyncAPI']:
|
||||
node_data = public.S('node', self.node_db_obj._DB_FILE).where('id=?', (node_id,)).find()
|
||||
if not node_data:
|
||||
return None
|
||||
|
||||
if node_data["api_key"] == "local" and node_data["app_key"] == "local":
|
||||
return None
|
||||
|
||||
if node_data['lpver']:
|
||||
return None
|
||||
|
||||
return BtRsyncAPI(node_data["address"], node_data["api_key"], "")
|
||||
|
||||
def has_rsync_perm(self) -> bool:
|
||||
data, err = self._request("/plugin", "a", pdata={"name": "rsync"})
|
||||
if err:
|
||||
return False
|
||||
return data["status"]
|
||||
|
||||
def is_setup_rsync(self) -> bool:
|
||||
data, err = self._request("/plugin", "get_soft_find", pdata={"sName": "rsync"})
|
||||
if err:
|
||||
return False
|
||||
try:
|
||||
return data["setup"]
|
||||
except:
|
||||
return False
|
||||
|
||||
def add_module(self, path: str, name: str, password: str, add_white_ips: List[str]) -> Tuple[Optional[dict], str]:
|
||||
return self._plugin_api_func("add_module", **{
|
||||
"path": path,
|
||||
"mName": name,
|
||||
"password": password,
|
||||
"add_white_ips": json.dumps(add_white_ips)
|
||||
})
|
||||
|
||||
|
||||
|
||||
def get_rsync_api_node(node_id: int) -> Optional[Union['BtRsyncAPI', 'BtLocalRsyncAPI']]:
|
||||
srv = BtLocalRsyncAPI.new_by_id(node_id)
|
||||
if srv:
|
||||
return srv
|
||||
return BtRsyncAPI.new_by_id(node_id)
|
||||
783
mod/project/node/nodeutil/ssh_warp_scripts/system_info.sh
Normal file
783
mod/project/node/nodeutil/ssh_warp_scripts/system_info.sh
Normal file
@@ -0,0 +1,783 @@
|
||||
#!/bin/bash
|
||||
|
||||
# 确保输出是纯JSON,不包含其他信息
|
||||
export LANG=C
|
||||
export LC_ALL=C
|
||||
|
||||
# 定义临时文件路径
|
||||
NETWORK_DATA_FILE="/tmp/system_network_data_$(id -u).json"
|
||||
|
||||
# 收集网络接口数据并计算速率
|
||||
collect_network() {
|
||||
result="{"
|
||||
first=true
|
||||
current_time=$(date +%s)
|
||||
|
||||
# 读取之前的数据(如果存在)
|
||||
prev_data=""
|
||||
prev_time=0
|
||||
if [ -f "$NETWORK_DATA_FILE" ]; then
|
||||
prev_data=$(cat "$NETWORK_DATA_FILE")
|
||||
prev_time=$(echo "$prev_data" | grep -o '"time": [0-9]*' | head -1 | awk '{print $2}')
|
||||
[ -z "$prev_time" ] && prev_time=0
|
||||
fi
|
||||
|
||||
# 创建临时存储当前数据的文件
|
||||
temp_current_data="/tmp/system_network_current_$(id -u).json"
|
||||
echo "{\"time\": $current_time," > "$temp_current_data"
|
||||
|
||||
# 计算时间间隔(秒)
|
||||
time_diff=1
|
||||
if [ $prev_time -ne 0 ]; then
|
||||
time_diff=$((current_time - prev_time))
|
||||
[ $time_diff -le 0 ] && time_diff=1 # 防止除以零
|
||||
fi
|
||||
|
||||
# 收集所有网络接口的信息
|
||||
for iface in $(ls /sys/class/net/ | grep -v "lo"); do
|
||||
if [ "$first" = true ]; then
|
||||
first=false
|
||||
echo "\"interfaces\": {" >> "$temp_current_data"
|
||||
else
|
||||
result+=","
|
||||
echo "," >> "$temp_current_data"
|
||||
fi
|
||||
|
||||
# 读取当前网络接口统计
|
||||
rx_bytes=$(cat /sys/class/net/$iface/statistics/rx_bytes 2>/dev/null || echo 0)
|
||||
tx_bytes=$(cat /sys/class/net/$iface/statistics/tx_bytes 2>/dev/null || echo 0)
|
||||
rx_packets=$(cat /sys/class/net/$iface/statistics/rx_packets 2>/dev/null || echo 0)
|
||||
tx_packets=$(cat /sys/class/net/$iface/statistics/tx_packets 2>/dev/null || echo 0)
|
||||
|
||||
# 保存当前数据到临时文件
|
||||
echo "\"$iface\": {\"rx_bytes\": $rx_bytes, \"tx_bytes\": $tx_bytes, \"rx_packets\": $rx_packets, \"tx_packets\": $tx_packets}" >> "$temp_current_data"
|
||||
|
||||
# 计算速率(如果有之前的数据)
|
||||
down_speed=0
|
||||
up_speed=0
|
||||
|
||||
if [ -n "$prev_data" ]; then
|
||||
# 提取之前的数据
|
||||
prev_rx_bytes=$(echo "$prev_data" | grep -o "\"$iface\".*rx_bytes.*tx_bytes" | grep -o "rx_bytes\": [0-9]*" | awk '{print $2}')
|
||||
prev_tx_bytes=$(echo "$prev_data" | grep -o "\"$iface\".*tx_bytes.*rx_packets" | grep -o "tx_bytes\": [0-9]*" | awk '{print $2}')
|
||||
|
||||
# 如果找到了之前的数据,计算速率
|
||||
if [ -n "$prev_rx_bytes" ] && [ -n "$prev_tx_bytes" ]; then
|
||||
# 计算差值
|
||||
rx_diff=$((rx_bytes - prev_rx_bytes))
|
||||
tx_diff=$((tx_bytes - prev_tx_bytes))
|
||||
|
||||
# 确保值不是负数(可能由于系统重启计数器重置)
|
||||
[ $rx_diff -lt 0 ] && rx_diff=0
|
||||
[ $tx_diff -lt 0 ] && tx_diff=0
|
||||
|
||||
# 安全地计算速率
|
||||
down_speed=$(awk "BEGIN {printf \"%.2f\", $rx_diff / $time_diff / 1024}")
|
||||
up_speed=$(awk "BEGIN {printf \"%.2f\", $tx_diff / $time_diff / 1024}")
|
||||
fi
|
||||
fi
|
||||
|
||||
# 添加接口信息到结果
|
||||
result+=$(cat << EOF
|
||||
"$iface": {
|
||||
"down": $down_speed,
|
||||
"up": $up_speed,
|
||||
"downTotal": $rx_bytes,
|
||||
"upTotal": $tx_bytes,
|
||||
"downPackets": $rx_packets,
|
||||
"upPackets": $tx_packets
|
||||
}
|
||||
EOF
|
||||
)
|
||||
done
|
||||
|
||||
# 完成当前数据文件
|
||||
if [ "$first" = false ]; then
|
||||
echo "}" >> "$temp_current_data"
|
||||
else
|
||||
echo "\"interfaces\": {}" >> "$temp_current_data"
|
||||
fi
|
||||
echo "}" >> "$temp_current_data"
|
||||
|
||||
# 移动临时文件到持久文件位置
|
||||
mv "$temp_current_data" "$NETWORK_DATA_FILE"
|
||||
|
||||
result+="}"
|
||||
echo "$result"
|
||||
}
|
||||
|
||||
# 收集总体网络统计
|
||||
collect_total_network() {
|
||||
current_time=$(date +%s)
|
||||
|
||||
# 初始化计数器
|
||||
down_total=0
|
||||
up_total=0
|
||||
down_packets=0
|
||||
up_packets=0
|
||||
down_speed=0
|
||||
up_speed=0
|
||||
|
||||
# 读取之前的数据(如果存在)
|
||||
prev_data=""
|
||||
prev_time=0
|
||||
|
||||
if [ -f "$NETWORK_DATA_FILE" ]; then
|
||||
prev_data=$(cat "$NETWORK_DATA_FILE")
|
||||
prev_time=$(echo "$prev_data" | grep -o '"time": [0-9]*' | head -1 | awk '{print $2}')
|
||||
[ -z "$prev_time" ] && prev_time=0
|
||||
fi
|
||||
|
||||
# 计算时间间隔(秒)
|
||||
time_diff=1
|
||||
if [ $prev_time -ne 0 ]; then
|
||||
time_diff=$((current_time - prev_time))
|
||||
[ $time_diff -le 0 ] && time_diff=1 # 防止除以零
|
||||
fi
|
||||
|
||||
# 收集当前总流量
|
||||
for iface in $(ls /sys/class/net/ | grep -v "lo"); do
|
||||
# 读取当前网络接口统计
|
||||
rx_bytes=$(cat /sys/class/net/$iface/statistics/rx_bytes 2>/dev/null || echo 0)
|
||||
tx_bytes=$(cat /sys/class/net/$iface/statistics/tx_bytes 2>/dev/null || echo 0)
|
||||
rx_packets=$(cat /sys/class/net/$iface/statistics/rx_packets 2>/dev/null || echo 0)
|
||||
tx_packets=$(cat /sys/class/net/$iface/statistics/tx_packets 2>/dev/null || echo 0)
|
||||
|
||||
# 累加当前总量
|
||||
down_total=$((down_total + rx_bytes))
|
||||
up_total=$((up_total + tx_bytes))
|
||||
down_packets=$((down_packets + rx_packets))
|
||||
up_packets=$((up_packets + tx_packets))
|
||||
done
|
||||
|
||||
# 收集之前的总流量
|
||||
if [ -f "$NETWORK_DATA_FILE" ]; then
|
||||
for iface in $(ls /sys/class/net/ | grep -v "lo"); do
|
||||
# 提取之前的数据
|
||||
iface_prev_rx=$(echo "$prev_data" | grep -o "\"$iface\".*rx_bytes.*tx_bytes" | grep -o "rx_bytes\": [0-9]*" | awk '{print $2}')
|
||||
iface_prev_tx=$(echo "$prev_data" | grep -o "\"$iface\".*tx_bytes.*rx_packets" | grep -o "tx_bytes\": [0-9]*" | awk '{print $2}')
|
||||
|
||||
# 累加总流量
|
||||
if [ -n "$iface_prev_rx" ]; then
|
||||
prev_down_total=$((prev_down_total + iface_prev_rx))
|
||||
fi
|
||||
if [ -n "$iface_prev_tx" ]; then
|
||||
prev_up_total=$((prev_up_total + iface_prev_tx))
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# 计算总体速率
|
||||
if [ $prev_time -ne 0 ]; then
|
||||
rx_diff=$((down_total - prev_down_total))
|
||||
tx_diff=$((up_total - prev_up_total))
|
||||
|
||||
# 确保值不是负数
|
||||
[ $rx_diff -lt 0 ] && rx_diff=0
|
||||
[ $tx_diff -lt 0 ] && tx_diff=0
|
||||
|
||||
down_speed=$(awk "BEGIN {printf \"%.2f\", $rx_diff / $time_diff/ 1024}")
|
||||
up_speed=$(awk "BEGIN {printf \"%.2f\", $tx_diff / $time_diff/ 1024}")
|
||||
fi
|
||||
|
||||
# 返回结果
|
||||
cat << EOF
|
||||
{
|
||||
"down": $down_speed,
|
||||
"up": $up_speed,
|
||||
"downPackets": $down_packets,
|
||||
"upPackets": $up_packets,
|
||||
"downTotal": $down_total,
|
||||
"upTotal": $up_total
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
# 收集CPU信息
|
||||
collect_cpu() {
|
||||
# 定义临时文件路径(使用 mktemp 提高安全性)
|
||||
CPU_DATA_FILE="/tmp/system_cpu_data_$(id -u).json"
|
||||
TEMP_CURRENT_DATA=$(mktemp "/tmp/system_cpu_current_XXXXXXX.json")
|
||||
|
||||
# 初始化返回值
|
||||
local current_time
|
||||
current_time=$(date +%s)
|
||||
|
||||
# 读取当前CPU统计信息
|
||||
local current_cpu_stat
|
||||
if ! current_cpu_stat=$(cat /proc/stat | grep '^cpu ' | awk '{
|
||||
user_nice_system = ($2 + $3 + $4) + 0
|
||||
idle = $5 + 0
|
||||
total = (user_nice_system + idle + ($6 + 0) + ($7 + 0) + ($8 + 0))
|
||||
printf "%d,%d,%d", user_nice_system, idle, total
|
||||
}'); then
|
||||
echo "Unable to read CPU statistics information" >&2
|
||||
return 1
|
||||
fi
|
||||
local current_user_time=$(echo "$current_cpu_stat" | cut -d',' -f1)
|
||||
local current_idle_time=$(echo "$current_cpu_stat" | cut -d',' -f2)
|
||||
local current_total_time=$(echo "$current_cpu_stat" | cut -d',' -f3)
|
||||
|
||||
# 收集各核心当前统计信息
|
||||
local core_stats=()
|
||||
while read -r line; do
|
||||
if [[ $line =~ ^cpu[0-9]+ ]]; then
|
||||
local core_stat=$(echo "$line" | awk '{printf "%d,%d,%d", $2+$3+$4+$6+$7+$8, $5, $2+$3+$4+$5+$6+$7+$8}')
|
||||
core_stats+=("$core_stat")
|
||||
fi
|
||||
done < /proc/stat
|
||||
|
||||
# 读取之前的数据(如果存在)
|
||||
local prev_data=""
|
||||
local prev_time=0
|
||||
local prev_user_time=0
|
||||
local prev_idle_time=0
|
||||
local prev_total_time=0
|
||||
local prev_core_stats=()
|
||||
|
||||
if [[ -f "$CPU_DATA_FILE" ]]; then
|
||||
if ! prev_data=$(cat "$CPU_DATA_FILE"); then
|
||||
echo "Unable to read historical CPU data" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
prev_time=$(echo "$prev_data" | grep -o '"time": [0-9]*' | head -1 | awk '{print $2}')
|
||||
prev_user_time=$(echo "$prev_data" | grep -o '"user_time": [0-9]*' | head -1 | awk '{print $2}')
|
||||
prev_idle_time=$(echo "$prev_data" | grep -o '"idle_time": [0-9]*' | head -1 | awk '{print $2}')
|
||||
prev_total_time=$(echo "$prev_data" | grep -o '"total_time": [0-9]*' | head -1 | awk '{print $2}')
|
||||
|
||||
# 使用 awk 跨行匹配核心数据
|
||||
local i=0
|
||||
while true; do
|
||||
local core_data
|
||||
core_data=$(echo "$prev_data" | awk -v core="core_$i" '
|
||||
$0 ~ "\"" core "\": {" {flag=1; print; next}
|
||||
flag && /}/ {print; flag=0; exit}
|
||||
flag {print}
|
||||
')
|
||||
|
||||
if [[ -z "$core_data" ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
local core_user_time=$(echo "$core_data" | grep -o '"user_time": [0-9]*' | awk '{print $2}')
|
||||
local core_idle_time=$(echo "$core_data" | grep -o '"idle_time": [0-9]*' | awk '{print $2}')
|
||||
local core_total_time=$(echo "$core_data" | grep -o '"total_time": [0-9]*' | awk '{print $2}')
|
||||
|
||||
prev_core_stats+=("$core_user_time,$core_idle_time,$core_total_time")
|
||||
((i++))
|
||||
done
|
||||
fi
|
||||
|
||||
# 计算时间间隔(秒)
|
||||
local time_diff=$((current_time - prev_time))
|
||||
((time_diff <= 0)) && time_diff=1 # 防止除以零
|
||||
|
||||
# 计算总CPU使用率
|
||||
local cpu_usage=0
|
||||
if ((prev_total_time > 0)); then
|
||||
local user_diff=$((current_user_time - prev_user_time))
|
||||
local total_diff=$((current_total_time - prev_total_time))
|
||||
|
||||
# 防止负值(可能由于系统重启导致计数器重置)
|
||||
((user_diff < 0)) && user_diff=0
|
||||
((total_diff < 0)) && total_diff=0
|
||||
|
||||
if ((total_diff > 0)); then
|
||||
cpu_usage=$(awk "BEGIN {printf \"%.2f\", ($user_diff / $total_diff) * 100}")
|
||||
fi
|
||||
fi
|
||||
|
||||
# 获取逻辑核心数
|
||||
local logical_cores
|
||||
logical_cores=$(nproc 2>/dev/null || getconf _NPROCESSORS_ONLN 2>/dev/null || echo 1)
|
||||
|
||||
# 计算每个核心的使用率
|
||||
local cpu_cores_usage="["
|
||||
local first=true
|
||||
local i=0
|
||||
|
||||
for core_stat in "${core_stats[@]}"; do
|
||||
local core_user_time=$(echo "$core_stat" | cut -d',' -f1)
|
||||
local core_idle_time=$(echo "$core_stat" | cut -d',' -f2)
|
||||
local core_total_time=$(echo "$core_stat" | cut -d',' -f3)
|
||||
|
||||
local core_usage=0
|
||||
if ((i < ${#prev_core_stats[@]})); then
|
||||
local prev_core_stat=${prev_core_stats[$i]}
|
||||
local prev_core_user_time=$(echo "$prev_core_stat" | cut -d',' -f1)
|
||||
local prev_core_idle_time=$(echo "$prev_core_stat" | cut -d',' -f2)
|
||||
local prev_core_total_time=$(echo "$prev_core_stat" | cut -d',' -f3)
|
||||
|
||||
local core_user_diff=$((core_user_time - prev_core_user_time))
|
||||
local core_total_diff=$((core_total_time - prev_core_total_time))
|
||||
|
||||
# 防止负值
|
||||
((core_user_diff < 0)) && core_user_diff=0
|
||||
((core_total_diff < 0)) && core_total_diff=0
|
||||
|
||||
if ((core_total_diff > 0)); then
|
||||
core_usage=$(awk "BEGIN {printf \"%.2f\", ($core_user_diff / $core_total_diff) * 100}")
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$first" == true ]]; then
|
||||
first=false
|
||||
else
|
||||
cpu_cores_usage+=","
|
||||
fi
|
||||
|
||||
cpu_cores_usage+="$core_usage"
|
||||
((i++))
|
||||
done
|
||||
|
||||
cpu_cores_usage+="]"
|
||||
|
||||
# 获取CPU名称(优先使用lscpu)
|
||||
local cpu_name
|
||||
if command -v lscpu >/dev/null 2>&1; then
|
||||
cpu_name=$(lscpu | grep "Model name" | head -n 1 | cut -d':' -f2 | sed 's/^[[:space:]]*//')
|
||||
else
|
||||
cpu_name=$(grep "model name" /proc/cpuinfo | head -n 1 | cut -d':' -f2 | sed 's/^[[:space:]]*//')
|
||||
fi
|
||||
|
||||
# 获取核心数(优先使用lscpu)
|
||||
local physical_cores=1
|
||||
local physical_cpus=1
|
||||
if command -v lscpu >/dev/null 2>&1; then
|
||||
physical_cores=$(lscpu | grep "Core(s) per socket" | awk '{print $4}')
|
||||
physical_cpus=$(lscpu | grep "Socket(s)" | awk '{print $2}')
|
||||
else
|
||||
# 备用方法:解析/proc/cpuinfo
|
||||
physical_cpus=$(grep "physical id" /proc/cpuinfo | sort -u | wc -l)
|
||||
physical_cores=$(grep "cpu cores" /proc/cpuinfo | head -1 | awk '{print $4}')
|
||||
|
||||
# 如果无法获取核心数,计算保守估算
|
||||
if [[ -z "$physical_cores" ]]; then
|
||||
physical_cores=$(( logical_cores / physical_cpus ))
|
||||
fi
|
||||
fi
|
||||
|
||||
# 确保变量有值
|
||||
[[ -z "$physical_cores" ]] && physical_cores=1
|
||||
[[ -z "$physical_cpus" ]] && physical_cpus=1
|
||||
|
||||
# 保存当前CPU统计信息到临时文件用于下次比较
|
||||
{
|
||||
echo "{"
|
||||
echo " \"time\": $current_time,"
|
||||
echo " \"user_time\": $current_user_time,"
|
||||
echo " \"idle_time\": $current_idle_time,"
|
||||
echo " \"total_time\": $current_total_time,"
|
||||
|
||||
# 保存每个核心的统计信息
|
||||
local i=0
|
||||
for core_stat in "${core_stats[@]}"; do
|
||||
local core_user_time=$(echo "$core_stat" | cut -d',' -f1)
|
||||
local core_idle_time=$(echo "$core_stat" | cut -d',' -f2)
|
||||
local core_total_time=$(echo "$core_stat" | cut -d',' -f3)
|
||||
|
||||
echo " \"core_$i\": {"
|
||||
echo " \"user_time\": $core_user_time,"
|
||||
echo " \"idle_time\": $core_idle_time,"
|
||||
echo " \"total_time\": $core_total_time"
|
||||
|
||||
if ((i < ${#core_stats[@]} - 1)); then
|
||||
echo " },"
|
||||
else
|
||||
echo " }"
|
||||
fi
|
||||
|
||||
((i++))
|
||||
done
|
||||
|
||||
echo "}"
|
||||
} > "$TEMP_CURRENT_DATA"
|
||||
|
||||
# 原子性替换文件
|
||||
if ! mv "$TEMP_CURRENT_DATA" "$CPU_DATA_FILE"; then
|
||||
echo "Unable to save CPU data to $CPU_DATA_FILE" >&2
|
||||
rm -f "$TEMP_CURRENT_DATA"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# 返回格式化的结果
|
||||
echo "[$cpu_usage, $logical_cores, $cpu_cores_usage, \"$cpu_name\", $physical_cores, $physical_cpus]"
|
||||
}
|
||||
|
||||
# 收集CPU时间
|
||||
collect_cpu_times() {
|
||||
# 获取CPU时间
|
||||
cpu_line=$(cat /proc/stat | grep '^cpu ' | awk '{print $2,$3,$4,$5,$6,$7,$8,$9,$10,$11}')
|
||||
read -r user nice system idle iowait irq softirq steal guest guest_nice <<< "$cpu_line"
|
||||
|
||||
# 获取进程信息
|
||||
total_processes=$(ps -e | wc -l)
|
||||
active_processes=$(ps -eo stat | grep -c "R")
|
||||
|
||||
cat << EOF
|
||||
{
|
||||
"user": $user,
|
||||
"nice": $nice,
|
||||
"system": $system,
|
||||
"idle": $idle,
|
||||
"iowait": $iowait,
|
||||
"irq": $irq,
|
||||
"softirq": $softirq,
|
||||
"steal": $steal,
|
||||
"guest": $guest,
|
||||
"guest_nice": $guest_nice,
|
||||
"Total number of processes": $total_processes,
|
||||
"Number of activity processes": $active_processes
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
# 收集磁盘信息
|
||||
collect_disk() {
|
||||
df_output=$(df -TPB1 -x tmpfs -x devtmpfs | tail -n +2 | grep -vE "/boot\$" | grep -vE "docker/overlay2")
|
||||
|
||||
result="["
|
||||
first=true
|
||||
|
||||
while read -r filesystem type total used avail pcent mountpoint; do
|
||||
if [ "$first" = true ]; then
|
||||
first=false
|
||||
else
|
||||
result+=","
|
||||
fi
|
||||
|
||||
size_bytes=$total
|
||||
size_used=$used
|
||||
size_avail=$avail
|
||||
|
||||
# 格式化为人类可读大小(使用单独的awk命令处理每个值)
|
||||
size_human=$(echo "$size_bytes" | awk '{
|
||||
suffix="BKMGT"; value=$1;
|
||||
for(i=1; value>=1024 && i<length(suffix); i++) value/=1024;
|
||||
printf("%.2f%s", value, substr(suffix,i,1));
|
||||
}')
|
||||
|
||||
size_used_human=$(echo "$size_used" | awk '{
|
||||
suffix="BKMGT"; value=$1;
|
||||
for(i=1; value>=1024 && i<length(suffix); i++) value/=1024;
|
||||
printf("%.2f%s", value, substr(suffix,i,1));
|
||||
}')
|
||||
|
||||
size_avail_human=$(echo "$size_avail" | awk '{
|
||||
suffix="BKMGT"; value=$1;
|
||||
for(i=1; value>=1024 && i<length(suffix); i++) value/=1024;
|
||||
printf("%.2f%s", value, substr(suffix,i,1));
|
||||
}')
|
||||
|
||||
# 收集inode信息
|
||||
inode_info=$(df -i | grep -E "$mountpoint\$" | awk '{print $2,$3,$4,$5}')
|
||||
read -r itotal iused iavail ipcent <<< "$inode_info"
|
||||
|
||||
# 确保inode值不为空
|
||||
[ -z "$itotal" ] && itotal=0
|
||||
[ -z "$iused" ] && iused=0
|
||||
[ -z "$iavail" ] && iavail=0
|
||||
[ -z "$ipcent" ] && ipcent="0%"
|
||||
|
||||
result+=$(cat << EOF
|
||||
{
|
||||
"filesystem": "$filesystem",
|
||||
"types": "$type",
|
||||
"path": "$mountpoint",
|
||||
"rname": "$(basename "$mountpoint")",
|
||||
"byte_size": [$size_bytes, $size_used, $size_avail],
|
||||
"size": ["$size_human", "$size_used_human", "$size_avail_human"],
|
||||
"d_size": "$pcent",
|
||||
"inodes": [$itotal, $iused, $iavail, "$ipcent"]
|
||||
}
|
||||
EOF
|
||||
)
|
||||
done <<< "$df_output"
|
||||
|
||||
result+="]"
|
||||
echo "$result"
|
||||
}
|
||||
|
||||
# 收集IO统计
|
||||
collect_iostat() {
|
||||
result="{"
|
||||
first=true
|
||||
|
||||
disks=$(ls /sys/block/ 2>/dev/null | grep -E '^(sd|hd|vd|nvme)' 2>/dev/null || echo "")
|
||||
|
||||
for disk in $disks; do
|
||||
if [ -r "/sys/block/$disk/stat" ]; then
|
||||
if [ "$first" = true ]; then
|
||||
first=false
|
||||
else
|
||||
result+=","
|
||||
fi
|
||||
|
||||
# 读取磁盘统计信息
|
||||
disk_stats=$(cat /sys/block/$disk/stat 2>/dev/null)
|
||||
if [ -n "$disk_stats" ]; then
|
||||
# 使用默认值以防读取失败
|
||||
read_comp=0 read_merged=0 read_sectors=0 read_ms=0 write_comp=0 write_merged=0 write_sectors=0 write_ms=0 io_in_progress=0 io_ms_weighted=0
|
||||
|
||||
# 尝试读取值
|
||||
read read_comp read_merged read_sectors read_ms write_comp write_merged write_sectors write_ms io_in_progress io_ms_weighted <<< "$disk_stats"
|
||||
|
||||
# 转换扇区为字节 (512字节为一个扇区)
|
||||
read_bytes=$((read_sectors * 512))
|
||||
write_bytes=$((write_sectors * 512))
|
||||
|
||||
result+=$(cat << EOF
|
||||
"$disk": {
|
||||
"read_count": $read_comp,
|
||||
"read_merged_count": $read_merged,
|
||||
"read_bytes": $read_bytes,
|
||||
"read_time": $read_ms,
|
||||
"write_count": $write_comp,
|
||||
"write_merged_count": $write_merged,
|
||||
"write_bytes": $write_bytes,
|
||||
"write_time": $write_ms
|
||||
}
|
||||
EOF
|
||||
)
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
result+="}"
|
||||
echo "$result"
|
||||
}
|
||||
|
||||
# 收集负载信息
|
||||
collect_load() {
|
||||
load_avg=$(cat /proc/loadavg)
|
||||
read -r one five fifteen others <<< "$load_avg"
|
||||
|
||||
cpu_count=$(nproc)
|
||||
max_load=$((cpu_count * 2))
|
||||
|
||||
# 安全计算安全负载
|
||||
safe_load=$(awk "BEGIN {printf \"%.2f\", $max_load * 0.7}")
|
||||
|
||||
cat << EOF
|
||||
{
|
||||
"one": $one,
|
||||
"five": $five,
|
||||
"fifteen": $fifteen,
|
||||
"max": $max_load,
|
||||
"limit": $cpu_count,
|
||||
"safe": $safe_load
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
# 收集内存信息
|
||||
collect_mem() {
|
||||
mem_info=$(cat /proc/meminfo)
|
||||
|
||||
# 提取内存数据 (单位: KB)
|
||||
mem_total=$(awk '/^MemTotal/ {print $2; exit}' <<< "$mem_info" || echo 0)
|
||||
mem_free=$(awk '/^MemFree/ {print $2; exit}' <<< "$mem_info" || echo 0)
|
||||
mem_available=$(awk '/^MemAvailable/ {print $2; exit}' <<< "$mem_info" || echo "$mem_free")
|
||||
mem_buffers=$(awk '/^Buffers/ {print $2; exit}' <<< "$mem_info" || echo 0)
|
||||
mem_cached=$(awk '/^Cached:/ {print $2; exit}' <<< "$mem_info" || echo 0)
|
||||
mem_sreclaimable=$(awk '/^SReclaimable:/ {print $2; exit}' <<< "$mem_info" || echo 0)
|
||||
mem_buffers=$(awk '/^Buffers:/ {print $2; exit}' <<< "$mem_info" || echo 0)
|
||||
mem_shared=$(awk '/^Shmem/ {print $2; exit}' <<< "$mem_info" || echo 0)
|
||||
|
||||
# 确保数值有效
|
||||
[ -z "$mem_total" ] && mem_total=0
|
||||
[ -z "$mem_free" ] && mem_free=0
|
||||
[ -z "$mem_available" ] && mem_available=0
|
||||
[ -z "$mem_buffers" ] && mem_buffers=0
|
||||
[ -z "$mem_cached" ] && mem_cached=0
|
||||
[ -z "$mem_shared" ] && mem_shared=0
|
||||
[ -z "$mem_sreclaimable" ] && mem_sreclaimable=0
|
||||
[ -z "$mem_buffers" ] && mem_buffers=0
|
||||
|
||||
# 安全计算实际使用的内存
|
||||
mem_real_used=$((mem_total - mem_free - mem_buffers - mem_cached - mem_sreclaimable - mem_buffers))
|
||||
[ $mem_real_used -lt 0 ] && mem_real_used=0
|
||||
|
||||
# 转换为人类可读格式(单独处理每个值)
|
||||
mem_new_total=$(awk -v bytes="$((mem_total * 1024))" 'BEGIN {
|
||||
suffix="BKMGT"; value=bytes;
|
||||
for(i=1; value>=1024 && i<length(suffix); i++) value/=1024;
|
||||
printf("%.2f%s", value, substr(suffix,i,1));
|
||||
}')
|
||||
|
||||
mem_new_real_used=$(awk -v bytes="$((mem_real_used * 1024))" 'BEGIN {
|
||||
suffix="BKMGT"; value=bytes;
|
||||
for(i=1; value>=1024 && i<length(suffix); i++) value/=1024;
|
||||
printf("%.2f%s", value, substr(suffix,i,1));
|
||||
}')
|
||||
|
||||
# 转为字节
|
||||
mem_total_bytes=$((mem_total * 1024))
|
||||
mem_free_bytes=$((mem_free * 1024))
|
||||
mem_available_bytes=$((mem_available * 1024))
|
||||
mem_buffers_bytes=$((mem_buffers * 1024))
|
||||
mem_cached_bytes=$((mem_cached * 1024 + mem_sreclaimable * 1024 + mem_buffers* 1024))
|
||||
mem_real_used_bytes=$((mem_real_used * 1024))
|
||||
mem_shared_bytes=$((mem_shared * 1024))
|
||||
|
||||
cat << EOF
|
||||
{
|
||||
"memTotal": $mem_total_bytes,
|
||||
"memFree": $mem_free_bytes,
|
||||
"memAvailable": $mem_available_bytes,
|
||||
"memBuffers": $mem_buffers_bytes,
|
||||
"memCached": $mem_cached_bytes,
|
||||
"memRealUsed": $mem_real_used_bytes,
|
||||
"memShared": $mem_shared_bytes,
|
||||
"memNewTotal": "$mem_new_total",
|
||||
"memNewRealUsed": "$mem_new_real_used"
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
# 收集dmidecode物理内存信息
|
||||
collect_physical_memory() {
|
||||
# 检查是否有sudo权限
|
||||
if command -v sudo >/dev/null 2>&1; then
|
||||
SUDO_CMD="sudo"
|
||||
else
|
||||
SUDO_CMD=""
|
||||
fi
|
||||
|
||||
# 检查dmidecode是否已安装
|
||||
if ! command -v dmidecode >/dev/null 2>&1; then
|
||||
# 尝试安装dmidecode
|
||||
if command -v apt-get >/dev/null 2>&1; then
|
||||
$SUDO_CMD apt-get update >/dev/null 2>&1 && $SUDO_CMD apt-get install -y dmidecode >/dev/null 2>&1
|
||||
elif command -v yum >/dev/null 2>&1; then
|
||||
$SUDO_CMD yum install -y dmidecode >/dev/null 2>&1
|
||||
elif command -v dnf >/dev/null 2>&1; then
|
||||
$SUDO_CMD dnf install -y dmidecode >/dev/null 2>&1
|
||||
elif command -v zypper >/dev/null 2>&1; then
|
||||
$SUDO_CMD zypper install -y dmidecode >/dev/null 2>&1
|
||||
elif command -v pacman >/dev/null 2>&1; then
|
||||
$SUDO_CMD pacman -S --noconfirm dmidecode >/dev/null 2>&1
|
||||
fi
|
||||
fi
|
||||
|
||||
# 再次检查dmidecode是否可用
|
||||
if command -v dmidecode >/dev/null 2>&1; then
|
||||
# 首先尝试获取Maximum Capacity
|
||||
max_capacity=$($SUDO_CMD dmidecode -t memory 2>/dev/null | grep -i "Maximum Capacity:" | head -n1 | awk '
|
||||
{
|
||||
value = $3
|
||||
unit = $4
|
||||
# 转换为字节
|
||||
if (unit == "GB" || unit == "gb") {
|
||||
bytes = value * 1024 * 1024 * 1024
|
||||
} else if (unit == "MB" || unit == "mb") {
|
||||
bytes = value * 1024 * 1024
|
||||
} else if (unit == "TB" || unit == "tb") {
|
||||
bytes = value * 1024 * 1024 * 1024 * 1024
|
||||
} else {
|
||||
bytes = 0
|
||||
}
|
||||
printf "%.0f", bytes
|
||||
}
|
||||
')
|
||||
|
||||
if [ -n "$max_capacity" ] && [ "$max_capacity" -gt 0 ] 2>/dev/null; then
|
||||
echo "$max_capacity"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# 如果Maximum Capacity获取失败,尝试获取已安装内存大小
|
||||
total_memory=$($SUDO_CMD dmidecode -t memory 2>/dev/null | grep -i "Size:" | grep -i "[0-9]* GB\|[0-9]* MB" | awk '
|
||||
BEGIN { total = 0 }
|
||||
{
|
||||
value = $2
|
||||
unit = $3
|
||||
# 转换为字节
|
||||
if (unit == "GB" || unit == "gb") {
|
||||
bytes = value * 1024 * 1024 * 1024
|
||||
} else if (unit == "MB" || unit == "mb") {
|
||||
bytes = value * 1024 * 1024
|
||||
}
|
||||
total += bytes
|
||||
}
|
||||
END {
|
||||
printf "%.0f", total
|
||||
}
|
||||
')
|
||||
|
||||
if [ -n "$total_memory" ] && [ "$total_memory" -gt 0 ] 2>/dev/null; then
|
||||
echo "$total_memory"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# 如果任何步骤失败,返回0
|
||||
echo "0"
|
||||
return 1
|
||||
}
|
||||
|
||||
# 主函数:收集所有信息并生成JSON
|
||||
main() {
|
||||
# 收集系统信息
|
||||
os_name=$(cat /etc/os-release 2>/dev/null | grep "PRETTY_NAME" | cut -d "=" -f 2 | tr -d '"' || echo "Unknown")
|
||||
simple_system=$(awk -F= '
|
||||
/^ID=/ {id=$2}
|
||||
/^VERSION_ID=/ {gsub(/"/,"",$2); version=$2}
|
||||
END {
|
||||
gsub(/"/,"",id);
|
||||
print toupper(substr(id,1,1)) substr(id,2) " " version
|
||||
}' /etc/os-release 2>/dev/null || echo "Unknown")
|
||||
|
||||
hostname=$(hostname)
|
||||
current_time=$(date "+%Y-%m-%d %H:%M:%S")
|
||||
version="1.0.0" # 自定义版本
|
||||
|
||||
# 假设的站点和数据库计数 (实际需要根据具体环境采集)
|
||||
site_total=0
|
||||
database_total=0
|
||||
ftp_total=0
|
||||
installed=true
|
||||
|
||||
# 收集网络总统计
|
||||
network_stats=$(collect_total_network)
|
||||
down=$(echo "$network_stats" | grep -o '"down": [0-9.]*' | cut -d ":" -f 2 | tr -d " " || echo "0.00")
|
||||
up=$(echo "$network_stats" | grep -o '"up": [0-9.]*' | cut -d ":" -f 2 | tr -d " " || echo "0.00")
|
||||
down_packets=$(echo "$network_stats" | grep -o '"downPackets": [0-9]*' | cut -d ":" -f 2 | tr -d " " || echo "0")
|
||||
up_packets=$(echo "$network_stats" | grep -o '"upPackets": [0-9]*' | cut -d ":" -f 2 | tr -d " " || echo "0")
|
||||
down_total=$(echo "$network_stats" | grep -o '"downTotal": [0-9]*' | cut -d ":" -f 2 | tr -d " " || echo "0")
|
||||
up_total=$(echo "$network_stats" | grep -o '"upTotal": [0-9]*' | cut -d ":" -f 2 | tr -d " " || echo "0")
|
||||
physical_memory=$(collect_physical_memory)
|
||||
|
||||
# 生成最终JSON
|
||||
cat << EOF
|
||||
{
|
||||
"cpu": $(collect_cpu),
|
||||
"cpu_times": $(collect_cpu_times),
|
||||
"disk": $(collect_disk),
|
||||
"iostat": $(collect_iostat),
|
||||
"load": $(collect_load),
|
||||
"mem": $(collect_mem),
|
||||
"network": $(collect_network),
|
||||
"system": "$os_name",
|
||||
"simple_system": "$simple_system",
|
||||
"title": "$hostname",
|
||||
"time": "$current_time",
|
||||
"version": "$version",
|
||||
"site_total": $site_total,
|
||||
"database_total": $database_total,
|
||||
"ftp_total": $ftp_total,
|
||||
"installed": $installed,
|
||||
"down": $down,
|
||||
"up": $up,
|
||||
"downPackets": $down_packets,
|
||||
"upPackets": $up_packets,
|
||||
"downTotal": $down_total,
|
||||
"upTotal": $up_total,
|
||||
"physical_memory": $physical_memory
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
# 执行主函数
|
||||
main
|
||||
199
mod/project/node/nodeutil/ssh_wrap.py
Normal file
199
mod/project/node/nodeutil/ssh_wrap.py
Normal file
@@ -0,0 +1,199 @@
|
||||
import json
|
||||
import os.path
|
||||
import traceback
|
||||
from typing import Optional, Tuple, Callable, Union, Dict
|
||||
from mod.base.ssh_executor import SSHExecutor, CommandResult
|
||||
from mod.project.node.dbutil import ServerNodeDB, Node
|
||||
|
||||
import public
|
||||
|
||||
def is_much_difference(a:float, b:float)->bool:
|
||||
if a == 0 or b == 0:
|
||||
return True
|
||||
ratio = a / b
|
||||
return ratio >= 10 or ratio <= 0.1
|
||||
|
||||
class SSHApi:
|
||||
is_local = False
|
||||
_local_scripts_dir = os.path.join(os.path.dirname(__file__), "ssh_warp_scripts")
|
||||
|
||||
def __init__(self, host, port: int=22, username: str="root", password=None, pkey=None,
|
||||
pkey_passwd=None, threading_mod=False, timeout=20):
|
||||
self._real_ssh_conf = {
|
||||
"host": host,
|
||||
"username": username,
|
||||
"port": port,
|
||||
"password": password,
|
||||
"key_file": "",
|
||||
"passphrase": pkey_passwd,
|
||||
"key_data": pkey,
|
||||
"strict_host_key_checking": False,
|
||||
"allow_agent": False,
|
||||
"look_for_keys": False,
|
||||
"threading_mod": threading_mod,
|
||||
"timeout": timeout,
|
||||
}
|
||||
self._ssh_executor: Optional[SSHExecutor] = None
|
||||
|
||||
|
||||
@classmethod
|
||||
def new_by_id(cls, node_id: int, threading_mod=False) -> Optional["SSHApi"]:
|
||||
data = ServerNodeDB().get_node_by_id(node_id)
|
||||
if not data or not isinstance(data, dict):
|
||||
return None
|
||||
data["ssh_conf"] = json.loads(data["ssh_conf"])
|
||||
if not data["ssh_conf"]:
|
||||
return None
|
||||
data["ssh_conf"]["threading_mod"] = threading_mod
|
||||
return cls(**data["ssh_conf"])
|
||||
|
||||
def _get_ssh_executor(self) -> SSHExecutor:
|
||||
if self._ssh_executor:
|
||||
return self._ssh_executor
|
||||
self._ssh_executor = SSHExecutor(**self._real_ssh_conf)
|
||||
return self._ssh_executor
|
||||
|
||||
def get_net_work(self) -> Tuple[Optional[dict], str]:
|
||||
data, err = self._run_script("system_info.sh")
|
||||
if err:
|
||||
return None, err
|
||||
if not data.exit_code == 0:
|
||||
return None, data.stderr
|
||||
try:
|
||||
data = json.loads(data.stdout)
|
||||
if isinstance(data, dict) and "cpu" in data and "mem" in data:
|
||||
return self._tans_net_work_form_data(data), ""
|
||||
return None, "data in wrong format: %s" % str(data)
|
||||
except Exception as e:
|
||||
return None, str(e)
|
||||
|
||||
@staticmethod
|
||||
def _tans_net_work_form_data(data: dict):
|
||||
data["mem"]["memAvailable"] = round(data["mem"]["memAvailable"] / 1024 / 1024, 2)
|
||||
data["mem"]["memBuffers"] = round(data["mem"]["memBuffers"] / 1024 / 1024, 2)
|
||||
data["mem"]["memCached"] = round(data["mem"]["memCached"] / 1024 / 1024, 2)
|
||||
data["mem"]["memFree"] = round(data["mem"]["memFree"] / 1024 / 1024, 2)
|
||||
data["mem"]["memRealUsed"] = round(data["mem"]["memRealUsed"] / 1024 / 1024, 2)
|
||||
data["mem"]["memShared"] = round(data["mem"]["memShared"] / 1024 / 1024, 2)
|
||||
data["mem"]["memTotal"] = round(data["mem"]["memTotal"] / 1024 / 1024, 2)
|
||||
data["physical_memory"]= round(data["physical_memory"] / 1024 / 1024, 2)
|
||||
if is_much_difference(data["mem"]["memTotal"], data["physical_memory"]):
|
||||
if data["mem"]["memTotal"] >= 1024:
|
||||
data["mem"]["memNewTotal"] = "%.2fGB" % (data["mem"]["memTotal"] / 1024)
|
||||
else:
|
||||
data["mem"]["memNewTotal"] = "%.2fMB" % data["mem"]["memTotal"]
|
||||
else:
|
||||
if data["physical_memory"] >= 1024:
|
||||
data["mem"]["memNewTotal"] = "%.2fGB" % (data["physical_memory"] / 1024)
|
||||
else:
|
||||
data["mem"]["memNewTotal"] = "%.2fMB" % data["physical_memory"]
|
||||
return data
|
||||
|
||||
def _run_script(self, script_name: str) -> Tuple[Optional[CommandResult], str]:
|
||||
local_file = os.path.join(self._local_scripts_dir, script_name)
|
||||
if not os.path.exists(local_file):
|
||||
return None, "Script does not exist"
|
||||
executor = None
|
||||
try:
|
||||
executor = self._get_ssh_executor()
|
||||
executor.open()
|
||||
result = executor.execute_local_script_collect(local_file)
|
||||
return result, ""
|
||||
except RuntimeError:
|
||||
return None, "SSH connection failed"
|
||||
except Exception as e:
|
||||
return None, str(e)
|
||||
finally:
|
||||
if executor:
|
||||
executor.close()
|
||||
|
||||
def target_file_exits(self, target_file: str) -> Tuple[bool, str]:
|
||||
try:
|
||||
executor = self._get_ssh_executor()
|
||||
executor.open()
|
||||
result, err = executor.path_exists(target_file)
|
||||
return result, err
|
||||
except RuntimeError:
|
||||
print(traceback.format_exc(), flush=True)
|
||||
return False, "SSH connection failed"
|
||||
except Exception as e:
|
||||
print(traceback.format_exc(), flush=True)
|
||||
return False, str(e)
|
||||
|
||||
def create_dir(self, path: str) -> Tuple[bool, str]:
|
||||
try:
|
||||
executor = self._get_ssh_executor()
|
||||
executor.open()
|
||||
result, err = executor.create_dir(path)
|
||||
return result, err
|
||||
except RuntimeError:
|
||||
print(traceback.format_exc())
|
||||
return False, "SSH connection failed"
|
||||
except Exception as e:
|
||||
return False, str(e)
|
||||
|
||||
def upload_file(self, filename: str, target_path: str, mode: str = "cover",
|
||||
call_log: Callable[[int, str], None] = None) -> str:
|
||||
|
||||
if not os.path.isfile(filename):
|
||||
return "File: {} does not exist".format(filename)
|
||||
|
||||
target_file = os.path.join(target_path, os.path.basename(filename))
|
||||
path_info = self.path_info(target_file)
|
||||
if isinstance(path_info, str):
|
||||
return path_info
|
||||
|
||||
if path_info['exists'] and mode == "ignore":
|
||||
call_log(0, "File upload:{} -> {},The target file already exists, skip uploading".format(filename, target_file))
|
||||
return ""
|
||||
if path_info['exists'] and mode == "rename":
|
||||
upload_name = "{}_{}".format(os.path.basename(filename), public.md5(filename))
|
||||
call_log(0, "File upload:{} -> {},The target file already exists, it will be renamed to {}".format(filename, target_file, upload_name))
|
||||
else:
|
||||
upload_name = os.path.basename(filename)
|
||||
|
||||
try:
|
||||
executor = self._get_ssh_executor()
|
||||
executor.open()
|
||||
def progress_callback(current_size: int, total_size: int):
|
||||
if total_size == 0:
|
||||
return
|
||||
call_log(current_size * 100 // total_size, "" )
|
||||
executor.upload(filename, os.path.join(target_path, upload_name), progress_callback=progress_callback)
|
||||
except RuntimeError:
|
||||
print(traceback.format_exc(), flush=True)
|
||||
return "SSH connection failed"
|
||||
except Exception as e:
|
||||
print(traceback.format_exc(), flush=True)
|
||||
return str(e)
|
||||
return ""
|
||||
|
||||
def upload_dir_check(self, target_file: str) -> str:
|
||||
try:
|
||||
executor = self._get_ssh_executor()
|
||||
executor.open()
|
||||
path_info = executor.path_info(target_file)
|
||||
if not path_info['exists']:
|
||||
return ""
|
||||
if path_info['is_dir']:
|
||||
return "The name path is not a directory"
|
||||
return ""
|
||||
except RuntimeError:
|
||||
print(traceback.format_exc(), flush=True)
|
||||
return "SSH connection failed"
|
||||
except Exception as e:
|
||||
print(traceback.format_exc(), flush=True)
|
||||
return str(e)
|
||||
|
||||
def path_info(self, path: str) -> Union[str, Dict]:
|
||||
try:
|
||||
executor = self._get_ssh_executor()
|
||||
executor.open()
|
||||
path_info = executor.path_info(path)
|
||||
return path_info
|
||||
except RuntimeError as e:
|
||||
print(traceback.format_exc(), flush=True)
|
||||
return "SSH connection failed: {}".format(str(e))
|
||||
except Exception as e:
|
||||
print(traceback.format_exc(), flush=True)
|
||||
return "Failed to obtain path information:{}".format(str(e))
|
||||
3
mod/project/node/task_flow/__init__.py
Normal file
3
mod/project/node/task_flow/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .file_task import self_file_running_log, file_task_run_sync
|
||||
from .command_task import command_task_run_sync
|
||||
from .flow import flow_running_log, flow_useful_version
|
||||
195
mod/project/node/task_flow/command_task.py
Normal file
195
mod/project/node/task_flow/command_task.py
Normal file
@@ -0,0 +1,195 @@
|
||||
import json
|
||||
import threading
|
||||
import queue
|
||||
import time
|
||||
import traceback
|
||||
from typing import List, Dict, Callable, Any, Union
|
||||
|
||||
from mod.base.ssh_executor import SSHExecutor
|
||||
from mod.project.node.dbutil import ServerNodeDB, CommandTask, CommandLog, TaskFlowsDB
|
||||
|
||||
|
||||
class CMDTask(object):
|
||||
|
||||
def __init__(self, task: Union[int, CommandTask], log_id: int, call_update: Callable[[Any], None], exclude_nodes: List[int] = None):
|
||||
self._edb = TaskFlowsDB()
|
||||
if isinstance(task, int):
|
||||
self.task = self._edb.CommandTask.find("id = ?", (task,))
|
||||
elif isinstance(task, CommandTask):
|
||||
self.task = task
|
||||
else:
|
||||
raise ValueError("Task parameter error")
|
||||
if not self.task:
|
||||
raise RuntimeError("The specified task does not exist")
|
||||
if log_id == 0:
|
||||
self.task.elogs = self._edb.CommandLog.query("command_task_id = ? ", (self.task.id,))
|
||||
else:
|
||||
self.task.elogs = [self._edb.CommandLog.find("command_task_id = ? AND id = ?", (self.task.id, log_id))]
|
||||
if not self.task.elogs:
|
||||
raise RuntimeError("Task has no execution entry")
|
||||
|
||||
self._exclude_nodes = exclude_nodes or []
|
||||
self.task.elogs = [x for x in self.task.elogs if x.server_id not in self._exclude_nodes]
|
||||
|
||||
self.task.status = 1
|
||||
self._edb.CommandTask.update(self.task)
|
||||
self.end_queue = queue.Queue()
|
||||
self.end_status = False
|
||||
self.status: List[Dict] = []
|
||||
self.call_update = call_update
|
||||
self.status_dict: Dict[str, Union[List[Any], int]] = {
|
||||
"task_id": self.task.id,
|
||||
"task_type": "command",
|
||||
"flow_idx": self.task.step_index -1,
|
||||
"count": len(self.task.elogs),
|
||||
"complete": 0,
|
||||
"error": 0,
|
||||
"exclude_nodes": self._exclude_nodes,
|
||||
"error_nodes": [],
|
||||
"data": [],
|
||||
}
|
||||
|
||||
def end_func(self):
|
||||
edb = TaskFlowsDB()
|
||||
tmp_dict: Dict[int, CommandLog] = {}
|
||||
last_time = time.time()
|
||||
update_fields=("status",)
|
||||
complete_set, error_set = set(), set()
|
||||
while True:
|
||||
try:
|
||||
elog: CommandLog = self.end_queue.get(timeout=0.1)
|
||||
except queue.Empty:
|
||||
if self.end_status:
|
||||
break
|
||||
else:
|
||||
continue
|
||||
except Exception as e:
|
||||
print(e)
|
||||
break
|
||||
|
||||
if elog.status in (3, 4):
|
||||
error_set.add(elog.id)
|
||||
self.status_dict["error_nodes"].append(int(elog.server_id))
|
||||
self.status_dict["error"] = len(error_set)
|
||||
elif elog.status == 2:
|
||||
complete_set.add(elog.id)
|
||||
self.status_dict["complete"] = len(complete_set)
|
||||
|
||||
tmp_dict[elog.id] = elog
|
||||
if time.time() - last_time > 0.5:
|
||||
edb.CommandLog.bath_update(tmp_dict.values(), update_fields=update_fields)
|
||||
self.status_dict["data"] = [ l.to_show_data() for l in tmp_dict.values()]
|
||||
self.call_update(self.status_dict)
|
||||
tmp_dict.clear()
|
||||
|
||||
if tmp_dict:
|
||||
edb.CommandLog.bath_update(tmp_dict.values(), update_fields=update_fields)
|
||||
self.status_dict["data"] = [ l.to_show_data() for l in tmp_dict.values()]
|
||||
self.call_update(self.status_dict)
|
||||
|
||||
return
|
||||
|
||||
def start(self):
|
||||
thread_list = []
|
||||
s_db = ServerNodeDB()
|
||||
end_th = threading.Thread(target=self.end_func)
|
||||
end_th.start()
|
||||
|
||||
for (idx, log) in enumerate(self.task.elogs):
|
||||
log.log_idx = idx
|
||||
if log.status == 2: # 跳过已完成的
|
||||
self.end_queue.put(log)
|
||||
continue
|
||||
|
||||
log.status = 1
|
||||
ssh_conf = None
|
||||
node = s_db.get_node_by_id(log.server_id)
|
||||
if not node:
|
||||
log.status = 3
|
||||
log.write_log("Node data loss, unable to execute\n")
|
||||
|
||||
else:
|
||||
ssh_conf = json.loads(node["ssh_conf"])
|
||||
if not ssh_conf:
|
||||
log.status = 3
|
||||
log.write_log("Node SSH configuration data lost, unable to execute\n")
|
||||
|
||||
self.end_queue.put(log)
|
||||
|
||||
if not ssh_conf:
|
||||
continue
|
||||
|
||||
thread = threading.Thread(target=self.run_one, args=(ssh_conf, log))
|
||||
thread.start()
|
||||
thread_list.append(thread)
|
||||
|
||||
for i in thread_list:
|
||||
i.join()
|
||||
self.end_status = True
|
||||
end_th.join()
|
||||
if self.status_dict["error"] > 0:
|
||||
self.task.status = 3
|
||||
else:
|
||||
self.task.status = 2
|
||||
self._edb.CommandTask.update(self.task)
|
||||
self._edb.close()
|
||||
|
||||
def run_one(self, ssh_conf: dict, elog: CommandLog):
|
||||
ssh = SSHExecutor(
|
||||
host=ssh_conf["host"],
|
||||
port=ssh_conf["port"],
|
||||
username=ssh_conf["username"],
|
||||
password=ssh_conf["password"],
|
||||
key_data=ssh_conf["pkey"],
|
||||
passphrase=ssh_conf["pkey_passwd"])
|
||||
elog.write_log("Start executing the task\nStart establishing SSH connection...\n")
|
||||
try:
|
||||
ssh.open()
|
||||
def on_stdout(data):
|
||||
if isinstance(data, bytes):
|
||||
data = data.decode()
|
||||
elog.write_log(data)
|
||||
|
||||
elog.write_log("Start executing script...\n\n")
|
||||
t = time.time()
|
||||
res_code = ssh.execute_script_streaming(
|
||||
script_content=self.task.script_content,
|
||||
script_type=self.task.script_type,
|
||||
timeout=60*60,
|
||||
on_stdout=on_stdout,
|
||||
on_stderr=on_stdout
|
||||
)
|
||||
take_time = round((time.time() - t)* 1000, 2)
|
||||
elog.write_log("\n\nExecution completed, time-consuming [{}ms]\n".format(take_time))
|
||||
if res_code == 0:
|
||||
elog.status = 2
|
||||
elog.write_log("Mission accomplished\n", is_end_log=True)
|
||||
else:
|
||||
elog.status = 4
|
||||
elog.write_log("Task exception, return status code is:{}\n".format(res_code), is_end_log=True)
|
||||
self.end_queue.put(elog)
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
elog.status = 3
|
||||
elog.write_log("\nTask failed, error:" + str(e), is_end_log=True)
|
||||
self.end_queue.put(elog)
|
||||
return
|
||||
|
||||
|
||||
# 同步执行命令相关任务的重试
|
||||
def command_task_run_sync(task_id: int, log_id: int) -> Union[str, Dict[str, Any]]:
|
||||
fdb = TaskFlowsDB()
|
||||
task = fdb.CommandTask.get_byid(task_id)
|
||||
if not task:
|
||||
return "Task does not exist"
|
||||
log = fdb.CommandLog.get_byid(log_id)
|
||||
if not log:
|
||||
return "Subtask does not exist"
|
||||
if log.status not in (3, 4):
|
||||
return "Subtask status is not failed, unable to retry"
|
||||
if log.command_task_id != task_id:
|
||||
return "The subtask does not belong to this task and cannot be retried"
|
||||
cmd_task = CMDTask(task, log_id=log_id, call_update=print)
|
||||
cmd_task.start()
|
||||
return cmd_task.status_dict
|
||||
|
||||
484
mod/project/node/task_flow/file_task.py
Normal file
484
mod/project/node/task_flow/file_task.py
Normal file
@@ -0,0 +1,484 @@
|
||||
import json
|
||||
import os
|
||||
import threading
|
||||
import queue
|
||||
import time
|
||||
import traceback
|
||||
import itertools
|
||||
from datetime import datetime
|
||||
from typing import List, Dict, Callable, Any, Tuple, Union, Optional
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
from mod.base.ssh_executor import SSHExecutor
|
||||
from mod.project.node.dbutil import ServerNodeDB, TaskFlowsDB, TransferTask, TransferFile, TransferLog
|
||||
from mod.project.node.nodeutil import ServerNode, LPanelNode, SSHApi
|
||||
from mod.project.node.filetransfer.socket_server import StatusServer, StatusClient, register_cleanup
|
||||
|
||||
|
||||
_SOCKET_FILE_DIR = "/tmp/flow_task"
|
||||
if not os.path.exists(_SOCKET_FILE_DIR):
|
||||
os.mkdir(_SOCKET_FILE_DIR)
|
||||
|
||||
def _dir_walk(path: str) -> Tuple[List[dict], str]:
|
||||
if not os.path.isdir(path):
|
||||
return [], "{} Not a directory".format(path)
|
||||
res_file = []
|
||||
count = 0
|
||||
empty_dir = []
|
||||
for root, dirs, files in os.walk(path):
|
||||
if not files:
|
||||
empty_dir.append(root)
|
||||
for f in files:
|
||||
count += 1
|
||||
try:
|
||||
res_file.append({
|
||||
"path": os.path.join(root, f),
|
||||
"size": os.path.getsize(os.path.join(root, f)),
|
||||
"is_dir": 0
|
||||
})
|
||||
except:
|
||||
pass
|
||||
return [{"path": d, "size": 0, "is_dir": 1} for d in empty_dir] + res_file, ""
|
||||
|
||||
|
||||
class FiletransferTask(object):
|
||||
|
||||
def __init__(self, task: Union[int, TransferTask],
|
||||
call_update: Callable[[Any], None],
|
||||
exclude_nodes: List[int] = None,
|
||||
the_log_id: int = None,
|
||||
):
|
||||
self._fdb = TaskFlowsDB()
|
||||
if isinstance(task, int):
|
||||
self.task = self._fdb.TransferTask.get_byid(task)
|
||||
elif isinstance(task, TransferTask):
|
||||
self.task = task
|
||||
else:
|
||||
raise ValueError("Parameter exception")
|
||||
|
||||
if not self.task:
|
||||
raise RuntimeError("Task does not exist")
|
||||
|
||||
self.exclude_nodes = exclude_nodes or []
|
||||
self.the_log_id = max(the_log_id, 0) if isinstance(the_log_id, int) else 0
|
||||
self.event_queue = queue.Queue()
|
||||
self.trans_queue = queue.Queue()
|
||||
self.mut = threading.Lock()
|
||||
self._srv_cache: Dict[int, Union[SSHApi, LPanelNode, ServerNode]] = {}
|
||||
self.status_dict: Dict[str, Any] = {
|
||||
"task_id": self.task.id,
|
||||
"task_type": "file",
|
||||
"flow_idx": self.task.step_index -1,
|
||||
"count": 0,
|
||||
"complete": 0,
|
||||
"error": 0,
|
||||
"error_nodes": [],
|
||||
"exclude_nodes": self.exclude_nodes,
|
||||
"data": None,
|
||||
}
|
||||
self.is_trans_end = False
|
||||
self.call_update = call_update
|
||||
|
||||
def _init_files(self): # 初始化文件列表
|
||||
has_file = self._fdb.TransferFile.find("flow_id = ? AND transfer_task_id = ?", (self.task.flow_id, self.task.id))
|
||||
# 判断文件列表是否已经初始化
|
||||
if has_file:
|
||||
return
|
||||
|
||||
file_list = []
|
||||
for src_item in self.task.path_list:
|
||||
dst_path = src_item["dst_path"].rstrip("/")
|
||||
src_item["path"] = src_item["path"].rstrip("/")
|
||||
if not os.path.exists(src_item["path"]):
|
||||
continue
|
||||
src_item["is_dir"] = os.path.isdir(src_item["path"])
|
||||
if src_item["is_dir"]:
|
||||
f_list, err = _dir_walk(src_item["path"])
|
||||
if not f_list:
|
||||
src_item["dst_file"] = os.path.join(dst_path, os.path.basename(src_item["path"]))
|
||||
file_list.append(src_item)
|
||||
else:
|
||||
for f_item in f_list:
|
||||
f_item["dst_file"] = f_item["path"].replace(os.path.dirname(src_item["path"]), dst_path)
|
||||
file_list.extend(f_list)
|
||||
else:
|
||||
if not os.path.isfile(src_item["path"]):
|
||||
continue
|
||||
src_item["dst_file"] = os.path.join(dst_path, os.path.basename(src_item["path"]))
|
||||
src_item["size"] = os.path.getsize(src_item["path"])
|
||||
file_list.append(src_item)
|
||||
|
||||
t_list = []
|
||||
for f_item in file_list:
|
||||
fl = TransferFile(
|
||||
flow_id=self.task.flow_id,
|
||||
transfer_task_id=self.task.id,
|
||||
src_file=f_item["path"],
|
||||
dst_file=f_item["dst_file"],
|
||||
file_size=f_item["size"],
|
||||
is_dir=f_item["is_dir"],
|
||||
)
|
||||
t_list.append(fl)
|
||||
try:
|
||||
self._fdb.TransferFile.create(t_list)
|
||||
except:
|
||||
print("Failed to initialize file list", traceback.format_exc())
|
||||
|
||||
def _init_files_log(self):
|
||||
tf_list = self._fdb.TransferFile.query("flow_id = ? AND transfer_task_id = ?", (self.task.flow_id, self.task.id))
|
||||
if not tf_list:
|
||||
return []
|
||||
has_fl = self._fdb.TransferLog.query("transfer_task_id = ? AND transfer_file_id = ?", (self.task.id, tf_list[0].id))
|
||||
if has_fl:
|
||||
return self._fdb.TransferLog.query("transfer_task_id = ?", (self.task.id,))
|
||||
|
||||
fl_list = []
|
||||
for (tf, dst_node_id) in itertools.product(tf_list, self.task.dst_nodes.keys()):
|
||||
fl = TransferLog(
|
||||
flow_id=self.task.flow_id,
|
||||
transfer_task_id=self.task.id,
|
||||
transfer_file_id=tf.id,
|
||||
dst_node_idx=int(dst_node_id),
|
||||
status=0,
|
||||
progress=0,
|
||||
message=""
|
||||
)
|
||||
fl_list.append(fl)
|
||||
|
||||
try:
|
||||
self._fdb.TransferLog.create(fl_list)
|
||||
except:
|
||||
print("Failed to initialize file list", traceback.format_exc())
|
||||
|
||||
|
||||
def _get_srv(self, idx: int) -> Union[SSHApi, LPanelNode, ServerNode]:
|
||||
idx = int(idx)
|
||||
if idx in self._srv_cache:
|
||||
return self._srv_cache[idx]
|
||||
with self.mut:
|
||||
if idx in self._srv_cache:
|
||||
return self._srv_cache[idx]
|
||||
if idx not in self.task.dst_nodes:
|
||||
raise RuntimeError("Node index is out of range")
|
||||
srv_data: dict = self.task.dst_nodes[idx]
|
||||
if srv_data.get("lpver", None):
|
||||
srv = LPanelNode(srv_data["address"], srv_data["api_key"], srv_data["lpver"])
|
||||
elif srv_data["api_key"] or srv_data["app_key"]:
|
||||
srv = ServerNode(srv_data["address"], srv_data["api_key"], srv_data["app_key"])
|
||||
else:
|
||||
srv_data["ssh_conf"]["threading_mod"] = True # 线程模式, 在不同线程中使用同一个ssh链接的不同会话
|
||||
srv = SSHApi(**srv_data["ssh_conf"])
|
||||
self._srv_cache[idx] = srv
|
||||
return srv
|
||||
|
||||
def start(self):
|
||||
self.task.status = 1
|
||||
self._fdb.TransferTask.update(self.task)
|
||||
self._init_files()
|
||||
self._init_files_log()
|
||||
if self.the_log_id > 0: # 重试某个固定任务
|
||||
query_where = "transfer_task_id = ? and id = ?"
|
||||
files_logs = self._fdb.TransferLog.query(query_where, (self.task.id, self.the_log_id))
|
||||
else:
|
||||
if self.exclude_nodes:
|
||||
# 获取未完成文件列表
|
||||
query_where = "transfer_task_id = ? and status not in (2, 4) and dst_node_idx not in ({})".format(
|
||||
",".join(["?"] * len(self.exclude_nodes))
|
||||
)
|
||||
else:
|
||||
query_where = "transfer_task_id = ? and status not in (2, 4)"
|
||||
files_logs = self._fdb.TransferLog.query(query_where, (self.task.id, *self.exclude_nodes))
|
||||
files_list = self._fdb.TransferFile.query("transfer_task_id = ?", (self.task.id,))
|
||||
if not files_logs:
|
||||
return
|
||||
files_map = {fl.id: fl for fl in files_list}
|
||||
for (idx, fl) in enumerate(files_logs):
|
||||
fl.log_idx = idx
|
||||
fl.tf = files_map[fl.transfer_file_id]
|
||||
self.trans_queue.put(fl)
|
||||
|
||||
self.status_dict["count"] = len(files_logs)
|
||||
|
||||
th_event = threading.Thread(target=self.event_func,)
|
||||
th_event.start()
|
||||
|
||||
with ThreadPoolExecutor(max_workers=8) as executor:
|
||||
futures = [executor.submit(self.once_trans, worker_id) for worker_id in range(8)]
|
||||
for i in range(8):
|
||||
executor.submit(self.once_trans)
|
||||
for future in as_completed(futures):
|
||||
print("Completed result:", future.result())
|
||||
|
||||
self.is_trans_end = True
|
||||
th_event.join()
|
||||
if self.the_log_id > 0:
|
||||
# 如果有未完成或错误的文件, 则任务完成
|
||||
if self._fdb.TransferLog.count("transfer_task_id = ? and status in (0,1,3)", (self.task.id, )) == 0:
|
||||
self.task.status = 3
|
||||
else:
|
||||
self.task.status = 2
|
||||
else:
|
||||
self.task.status = 2 if self.status_dict["error"] == 0 else 3
|
||||
self._fdb.TransferTask.update(self.task)
|
||||
self._fdb.close()
|
||||
|
||||
def once_trans(self, worker_id: int):
|
||||
while True:
|
||||
try:
|
||||
tl = self.trans_queue.get(block=False)
|
||||
except queue.Empty:
|
||||
print("worker_id: %s, The queue is empty" % worker_id)
|
||||
break
|
||||
except Exception as e:
|
||||
print("worker_id: %s, Failed to obtain task" % worker_id)
|
||||
print(traceback.format_exc())
|
||||
break
|
||||
|
||||
# 执行一次文件传输
|
||||
try:
|
||||
if tl.status == 2: # 跳过已完成的文件
|
||||
self.event_queue.put(tl)
|
||||
continue
|
||||
srv = self._get_srv(tl.dst_node_idx)
|
||||
if tl.tf.is_dir: # 处理空目录
|
||||
exits, err = srv.target_file_exits(tl.tf.dst_file)
|
||||
if err: # 获取文件状态错误
|
||||
tl.message = err
|
||||
tl.status = 3
|
||||
self.event_queue.put(tl)
|
||||
elif exits: # 目标文件已存在
|
||||
tl.status = 4
|
||||
tl.progress = 100
|
||||
self.event_queue.put(tl)
|
||||
else: # 目标文件不存在, 创建目录
|
||||
res, err = srv.create_dir(tl.tf.dst_file)
|
||||
if err:
|
||||
tl.message = err
|
||||
tl.status = 3
|
||||
elif isinstance(res, dict):
|
||||
if res["status"]:
|
||||
tl.status = 2
|
||||
tl.message = ""
|
||||
tl.progress = 100
|
||||
else:
|
||||
tl.message = res["msg"]
|
||||
tl.status = 3
|
||||
else:
|
||||
tl.status = 2
|
||||
tl.message = ""
|
||||
tl.progress = 100
|
||||
|
||||
self.event_queue.put(tl)
|
||||
else: # 处理文件上传
|
||||
tl.status = 1
|
||||
self.event_queue.put(tl)
|
||||
def _call_log(progress, log):
|
||||
tl.progress = progress
|
||||
self.event_queue.put(tl)
|
||||
|
||||
err = srv.upload_file(
|
||||
filename=tl.tf.src_file,
|
||||
target_path=os.path.dirname(tl.tf.dst_file),
|
||||
mode="cover",
|
||||
call_log=_call_log)
|
||||
|
||||
if err:
|
||||
tl.status = 3
|
||||
tl.message = err
|
||||
else:
|
||||
tl.status = 2
|
||||
tl.message = ""
|
||||
tl.progress = 100
|
||||
|
||||
self.event_queue.put(tl)
|
||||
except Exception as e:
|
||||
err = traceback.format_exc()
|
||||
tl.status = 3
|
||||
tl.message = str(e) + "\n" + err
|
||||
self.event_queue.put(tl)
|
||||
|
||||
def event_func(self):
|
||||
fdb = TaskFlowsDB()
|
||||
last_time = time.time()
|
||||
tmp_dict = {}
|
||||
update_fields = ("status", "message", "progress", "completed_at", "started_at")
|
||||
complete_set, error_set = set(), set()
|
||||
error_node_set = set()
|
||||
while True:
|
||||
try:
|
||||
tl: TransferLog = self.event_queue.get(timeout=0.1)
|
||||
except queue.Empty:
|
||||
if self.is_trans_end:
|
||||
break
|
||||
else:
|
||||
continue
|
||||
except Exception as e:
|
||||
print(e)
|
||||
break
|
||||
if tl.status in (2, 4):
|
||||
complete_set.add(tl.id)
|
||||
self.status_dict["complete"] = len(complete_set)
|
||||
if not tl.started_at:
|
||||
tl.started_at = tl.started_at or datetime.now()
|
||||
tl.completed_at = tl.completed_at or datetime.now()
|
||||
elif tl.status == 3:
|
||||
error_set.add(tl.id)
|
||||
self.status_dict["error"] = len(error_set)
|
||||
tl.completed_at = datetime.now()
|
||||
error_node_set.add(tl.dst_node_idx)
|
||||
elif tl.status == 1:
|
||||
tl.started_at = datetime.now()
|
||||
|
||||
tmp_dict[tl.id] = tl
|
||||
if time.time() - last_time > 0.5:
|
||||
fdb.TransferLog.bath_update(tmp_dict.values(), update_fields=update_fields)
|
||||
last_time = time.time()
|
||||
|
||||
self.status_dict["data"] = [i.to_show_data() for i in tmp_dict.values()]
|
||||
self.status_dict["error_nodes"] = list(error_node_set)
|
||||
self.call_update(self.status_dict)
|
||||
tmp_dict.clear()
|
||||
|
||||
|
||||
if tmp_dict:
|
||||
fdb.TransferLog.bath_update(tmp_dict.values(), update_fields=update_fields)
|
||||
self.status_dict["data"] = [i.to_show_data() for i in tmp_dict.values()]
|
||||
self.status_dict["error_nodes"] = list(error_node_set)
|
||||
self.call_update(self.status_dict)
|
||||
|
||||
fdb.close()
|
||||
|
||||
|
||||
# 在远程节点上执行文件传输
|
||||
class NodeFiletransferTask(object):
|
||||
|
||||
def __init__(self, task: TransferTask,
|
||||
call_update: Callable[[Any], None],
|
||||
exclude_nodes: List[int] = None,
|
||||
the_log_id: int = None,
|
||||
):
|
||||
self.task = task
|
||||
src_node = task.src_node
|
||||
self.exclude_nodes = exclude_nodes or []
|
||||
self.srv = ServerNode(src_node["address"],src_node["api_key"], src_node["app_key"], src_node["name"])
|
||||
self.the_log_id = max(the_log_id, 0) if isinstance(the_log_id, int) else 0
|
||||
self.call_update = call_update
|
||||
self.default_status_data = {
|
||||
"task_id": self.task.id,
|
||||
"task_type": "file",
|
||||
}
|
||||
self.status_dict = dict() # 状态数据
|
||||
|
||||
def start(self):
|
||||
fdb = TaskFlowsDB()
|
||||
self.task.status = 1
|
||||
fdb.TransferTask.update(self.task)
|
||||
err = self.srv.proxy_transferfile_status(self.task.src_node_task_id, self.exclude_nodes, self.the_log_id, self.handle_proxy_data)
|
||||
if err:
|
||||
self.task.status = 3
|
||||
self.task.message += ";" + err
|
||||
else:
|
||||
if self.status_dict and self.status_dict.get("error", 0):
|
||||
self.task.status = 3
|
||||
else:
|
||||
self.task.status = 2
|
||||
if self.task.message:
|
||||
self.task.status = 3
|
||||
|
||||
fdb.TransferTask.update(self.task)
|
||||
|
||||
def handle_proxy_data(self, data):
|
||||
ret = {"count": 0,"complete": 0,"error": 0, "error_nodes":[], "data": []}
|
||||
try:
|
||||
data_dict = json.loads(data)
|
||||
if "type" not in data_dict:
|
||||
return
|
||||
|
||||
if data_dict["type"] == "status":
|
||||
if "init" in data_dict["data"]: # 初始化状态跳过
|
||||
return
|
||||
ret.update(data_dict["data"])
|
||||
ret.update(self.default_status_data)
|
||||
else: # end / error 状态 获取历史数据或错误信息
|
||||
if "data" in data_dict:
|
||||
ret.update(data_dict["data"])
|
||||
ret.update(self.default_status_data)
|
||||
elif "msg" in data_dict:
|
||||
self.task.message = data_dict["msg"]
|
||||
return
|
||||
except:
|
||||
print(traceback.format_exc())
|
||||
ret["data"].append({"message": "Data source node execution transmission exception, please check if the node is functioning properly"})
|
||||
ret.update(self.default_status_data)
|
||||
|
||||
self.status_dict = ret
|
||||
self.call_update(ret)
|
||||
|
||||
|
||||
# 本机执行文件传输,返回信息到远程节点
|
||||
class SelfFiletransferTask(object):
|
||||
|
||||
def __init__(self, task_id: int, exclude_nodes: List[int] = None, the_log_id: int = None):
|
||||
self.status_server = StatusServer(self.get_status, (_SOCKET_FILE_DIR + "/file_task_" + str(task_id)))
|
||||
self.f_task = FiletransferTask(task_id, self.update_status, exclude_nodes, the_log_id)
|
||||
|
||||
@staticmethod
|
||||
def get_status( init: bool = False) -> Dict:
|
||||
return {"init": True }
|
||||
|
||||
def start_status_server(self):
|
||||
t = threading.Thread(target=self.status_server.start_server, args=(), daemon=True)
|
||||
t.start()
|
||||
register_cleanup(self.status_server)
|
||||
|
||||
def update_status(self, update_data: Dict):
|
||||
self.status_server.update_status(update_data)
|
||||
|
||||
def start(self):
|
||||
self.start_status_server()
|
||||
self.f_task.start()
|
||||
return
|
||||
|
||||
|
||||
def self_file_running_log(task_id: int, call_log: Callable[[Union[str,dict]], None], timeout:float = 3.0) -> str:
|
||||
socket_file = _SOCKET_FILE_DIR + "/file_task_" + str(task_id)
|
||||
while not os.path.exists(socket_file):
|
||||
if timeout <= 0:
|
||||
return "Task startup timeout"
|
||||
timeout -= 0.05
|
||||
time.sleep(0.05)
|
||||
|
||||
s_client = StatusClient(socket_file, callback=call_log)
|
||||
s_client.connect()
|
||||
s_client.wait_receive()
|
||||
return ""
|
||||
|
||||
|
||||
# 同步执行文件相关任务的重试
|
||||
def file_task_run_sync(task_id: int, log_id: int) -> Union[str, Dict[str, Any]]:
|
||||
fdb = TaskFlowsDB()
|
||||
task = fdb.TransferTask.get_byid(task_id)
|
||||
if not task:
|
||||
return "Task does not exist"
|
||||
|
||||
# 远程节点任务
|
||||
if task.src_node_task_id > 0:
|
||||
node_file_task = NodeFiletransferTask(task, print, exclude_nodes=[], the_log_id=log_id)
|
||||
node_file_task.start()
|
||||
return node_file_task.status_dict
|
||||
|
||||
if not log_id:
|
||||
return "The log ID cannot be empty"
|
||||
log = fdb.TransferLog.get_byid(log_id)
|
||||
if not log:
|
||||
return "log does not exist"
|
||||
|
||||
if log.status != 3:
|
||||
return "The task status is not abnormal, no need to retry"
|
||||
|
||||
if log.transfer_task_id != task_id:
|
||||
return "The log ID does not match the task ID"
|
||||
|
||||
file_task = FiletransferTask(task, print, exclude_nodes=[], the_log_id=log_id)
|
||||
file_task.start()
|
||||
return file_task.status_dict
|
||||
152
mod/project/node/task_flow/flow.py
Normal file
152
mod/project/node/task_flow/flow.py
Normal file
@@ -0,0 +1,152 @@
|
||||
import json
|
||||
import os
|
||||
import threading
|
||||
import queue
|
||||
import time
|
||||
import traceback
|
||||
from typing import List, Dict, Callable, Any, Union, Optional, Tuple
|
||||
|
||||
from mod.base.ssh_executor import SSHExecutor
|
||||
from mod.project.node.dbutil import ServerNodeDB, CommandTask, CommandLog, TaskFlowsDB, TransferTask
|
||||
from mod.project.node.dbutil import TaskFlowsDB
|
||||
from mod.project.node.nodeutil import LPanelNode, ServerNode, SSHApi
|
||||
from mod.project.node.filetransfer.socket_server import StatusServer, StatusClient, register_cleanup
|
||||
|
||||
from .command_task import CMDTask
|
||||
from .file_task import FiletransferTask, NodeFiletransferTask
|
||||
|
||||
_SOCKET_FILE_DIR = "/tmp/flow_task"
|
||||
if not os.path.exists(_SOCKET_FILE_DIR):
|
||||
os.mkdir(_SOCKET_FILE_DIR)
|
||||
|
||||
|
||||
|
||||
class FlowTask:
|
||||
|
||||
def __init__(self, flow_id: int, step_idx: int=0, sub_id: int=0):
|
||||
self._fdb = TaskFlowsDB()
|
||||
self.flow = self._fdb.Flow.get_byid(flow_id)
|
||||
if not self.flow:
|
||||
raise RuntimeError("Task does not exist")
|
||||
|
||||
self.steps: List[Union[CommandTask, TransferTask]] = [
|
||||
*self._fdb.CommandTask.query("flow_id = ?", (flow_id,)),
|
||||
*self._fdb.TransferTask.query("flow_id = ?", (flow_id,))
|
||||
]
|
||||
|
||||
self.steps.sort(key=lambda x: x.step_index, reverse=False)
|
||||
|
||||
if not self.steps:
|
||||
raise RuntimeError("The task content does not exist")
|
||||
self.now_idx = 1
|
||||
# 当任意错误出现时,是否继续执行
|
||||
self.run_when_error = False
|
||||
if self.flow.strategy.get("run_when_error", False):
|
||||
self.run_when_error = True
|
||||
# 当某个节点出错时,是否在后续步骤中跳过
|
||||
self.exclude_when_error = True
|
||||
if not self.flow.strategy.get("exclude_when_error", True):
|
||||
self.exclude_when_error = False
|
||||
|
||||
self.status_server = StatusServer(self.get_status, (_SOCKET_FILE_DIR + "/flow_task_" + str(flow_id)))
|
||||
self.flow_all_nodes = set([int(i) for i in self.flow.server_ids.split("|") if i and i.isdigit()])
|
||||
|
||||
def get_status(self, init: bool = False):
|
||||
flow_data = self.flow.to_dict()
|
||||
flow_data["steps"] = [x.to_show_data() for x in self.steps]
|
||||
flow_data["now_idx"] = self.now_idx
|
||||
return flow_data
|
||||
|
||||
def start_status_server(self):
|
||||
t = threading.Thread(target=self.status_server.start_server, args=(), daemon=True)
|
||||
t.start()
|
||||
register_cleanup(self.status_server)
|
||||
|
||||
def update_status(self, update_data: Dict):
|
||||
self.status_server.update_status(update_data)
|
||||
|
||||
def _run(self) -> bool:
|
||||
def call_log(log_data):
|
||||
self.update_status(log_data)
|
||||
|
||||
all_status = True # 任务全部成功
|
||||
error_nodes = set()
|
||||
for step in self.steps:
|
||||
if not (self.flow_all_nodes - error_nodes): # 没有节点可执行
|
||||
continue
|
||||
if isinstance(step, CommandTask):
|
||||
if step.status != 2: # 跳过已完成的
|
||||
has_err, task_error_nodes = self.run_cmd_task(step, call_log, exclude_nodes=list(error_nodes))
|
||||
all_status = all_status and not has_err
|
||||
if has_err and not self.run_when_error:
|
||||
return False
|
||||
if self.exclude_when_error and task_error_nodes:
|
||||
error_nodes.update(task_error_nodes)
|
||||
elif isinstance(step, TransferTask):
|
||||
if step.status != 2: # 跳过已完成的
|
||||
has_err, task_error_nodes = self.run_transfer_task(step, call_log, exclude_nodes=list(error_nodes))
|
||||
all_status = all_status and not has_err
|
||||
if has_err and not self.run_when_error:
|
||||
return False
|
||||
if self.exclude_when_error and task_error_nodes:
|
||||
error_nodes.update(task_error_nodes)
|
||||
self.now_idx += 1
|
||||
return all_status
|
||||
|
||||
def start(self):
|
||||
self.start_status_server()
|
||||
|
||||
self.flow.status = "running"
|
||||
self._fdb.Flow.update(self.flow)
|
||||
all_status = self._run()
|
||||
self.flow.status = "complete" if all_status else "error"
|
||||
self._fdb.Flow.update(self.flow)
|
||||
|
||||
self.status_server.stop()
|
||||
# fdb = TaskFlowsDB()
|
||||
# print(fdb.history_flow_task(self.flow.id))
|
||||
return
|
||||
|
||||
@staticmethod
|
||||
def run_cmd_task(task: CommandTask, call_log: Callable[[Any], None], exclude_nodes: List[int] = None) -> Tuple[bool, List[int]]:
|
||||
task = CMDTask(task, 0, call_log, exclude_nodes=exclude_nodes)
|
||||
task.start()
|
||||
return task.status_dict["error"] > 0, task.status_dict["error_nodes"]
|
||||
|
||||
@staticmethod
|
||||
def run_transfer_task(task: TransferTask, call_log: Callable[[Any], None], exclude_nodes: List[int] = None) -> Tuple[bool, List[int]]:
|
||||
if task.src_node_task_id != 0:
|
||||
task = NodeFiletransferTask(task, call_log, exclude_nodes=exclude_nodes, the_log_id=None)
|
||||
task.start()
|
||||
return task.status_dict["error"] > 0, task.status_dict["error_nodes"]
|
||||
else:
|
||||
task = FiletransferTask(task, call_log, exclude_nodes=exclude_nodes)
|
||||
task.start()
|
||||
return task.status_dict["error"] > 0, task.status_dict["error_nodes"]
|
||||
|
||||
|
||||
def flow_running_log(task_id: int, call_log: Callable[[Union[str,dict]], None], timeout:float = 3.0) -> str:
|
||||
socket_file = _SOCKET_FILE_DIR + "/flow_task_" + str(task_id)
|
||||
while not os.path.exists(socket_file):
|
||||
if timeout <= 0:
|
||||
return "Task startup timeout"
|
||||
timeout -= 0.05
|
||||
time.sleep(0.05)
|
||||
|
||||
s_client = StatusClient(socket_file, callback=call_log)
|
||||
s_client.connect()
|
||||
s_client.wait_receive()
|
||||
return ""
|
||||
|
||||
def flow_useful_version(ver: str):
|
||||
# # todo: 临时处理, 上线前确认最新版本号检查逻辑
|
||||
# return True
|
||||
try:
|
||||
ver_list = [int(i) for i in ver.split(".")]
|
||||
if ver_list[0] > 11:
|
||||
return True
|
||||
if ver_list[0] == 11 and ver_list[1] >= 4:
|
||||
return True
|
||||
except:
|
||||
pass
|
||||
return False
|
||||
Reference in New Issue
Block a user