Initial YakPanel commit

This commit is contained in:
Niranjan
2026-04-07 02:04:22 +05:30
commit 2826d3e7f3
5359 changed files with 1390724 additions and 0 deletions

View File

@@ -0,0 +1,11 @@
from .base import *
from .ssh_wrap import SSHApi
__all__ = [
"ServerNode",
"LocalNode",
"LPanelNode",
"monitor_node_once_with_timeout",
"monitor_node_once",
"SSHApi"
]

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,573 @@
import os.path
import shutil
import traceback
from uuid import uuid4
import requests
import time
import hashlib
import json
from typing import Optional, List, Any, Tuple, Dict
class OnePanelApiClient:
def __init__(self, panel_address, api_key, ver: str = "v2", timeout: int = 20):
"""
初始化 OnePanel API 客户端
Args:
panel_address (str): 1Panel 的访问地址 (例如: "http://your_server_ip:4004")
api_key (str): 您的 1Panel API Key
"""
self.panel_address = panel_address
self.api_key = api_key
self.ver = ver
self.timeout = timeout
self._call_err: Optional[Exception] = None
def _generate_token(self):
"""生成 1Panel API token 和时间戳"""
timestamp = str(int(time.time()))
sign_string = f"1panel{self.api_key}{timestamp}"
md5_hash = hashlib.md5(sign_string.encode()).hexdigest()
return md5_hash, timestamp
def _call_api(self, method, endpoint, json_data=None):
"""发送 API 请求"""
token, timestamp = self._generate_token()
headers = {
"1Panel-Token": token,
"1Panel-Timestamp": timestamp,
"Content-Type": "application/json"
}
url = "{}{}".format(self.panel_address, endpoint)
# print(f"Calling API: {method} {url}")
try:
response = requests.request(method, url, headers=headers, json=json_data, timeout=self.timeout)
response.raise_for_status() # 检查 HTTP 错误 (例如 4xx 或 5xx)
print(f"API Response Status: {response.status_code}")
return response.json()
except requests.exceptions.RequestException as e:
self._call_err = e
print(f"API call failed: {e}")
return None
except Exception as e:
self._call_err = e
print(f"API call failed: {e}")
return None
def add_website(self, site_name: str, port: int, **kwargs):
"""
添加网站
"""
endpoint = "/api/{}/websites".format(self.ver)
return self._call_api("POST", endpoint, json_data={
"primaryDomain": site_name,
"type": "static",
"alias": site_name,
"remark": kwargs.get("ps") if kwargs.get("ps", None) else "Pagoda yakpanel load balancing station",
"appType": "installed",
"webSiteGroupId": 1,
"otherDomains": "",
"proxy": "",
"appinstall": {
"appId": 0,
"name": "",
"appDetailId": 0,
"params": {},
"version": "",
"appkey": "",
"advanced": False,
"cpuQuota": 0,
"memoryLimit": 0,
"memoryUnit": "MB",
"containerName": "",
"allowPort": False
},
"IPV6": False,
"enableFtp": False,
"ftpUser": "",
"ftpPassword": "",
"proxyType": "tcp",
"port": 9000,
"proxyProtocol": "http://",
"proxyAddress": "",
"runtimeType": "php",
"taskID": str(uuid4()),
"createDb": False,
"dbName": "",
"dbPassword": "",
"dbFormat": "utf8mb4",
"dbUser": "",
"dbType": "mysql",
"dbHost": "",
"enableSSL": False,
"domains": [
{
"domain": site_name,
"port": port,
"ssl": False
}
],
"siteDir": ""
})
def check_site_create(self, site_name: str) -> Optional[int]:
endpoint = "/api/{}/websites/search".format(self.ver)
res_data = self._call_api("POST", endpoint, json_data={
"name": site_name,
"page": 1,
"pageSize": 10,
"orderBy": "favorite",
"order": "descending",
"websiteGroupId": 0,
"type": "static"
})
if res_data is not None and "data" in res_data and isinstance(res_data["data"], dict):
for item in res_data["data"].get("items", {}):
if item["alias"] == site_name:
return item["id"]
return None
def get_websites(self):
"""
获取所有网站信息
Returns:
dict: API 返回结果 (网站列表),失败返回 None
"""
# 示例接口路径,请根据您的 Swagger 文档修改
endpoint = "/api/{}/websites/list".format(self.ver)
return self._call_api("GET", endpoint)
def add_website_domain(self, website_id: int, new_domain: str, port: int):
"""
设置网站域名
"""
# 示例接口路径和参数,请根据您的 Swagger 文档修改
endpoint = "/api/{}/websites/domains".format(self.ver)
return self._call_api("POST", endpoint, json_data={
"websiteID": website_id,
"domains": [
{
"domain": new_domain,
"port": port,
"ssl": False
}
],
"domainStr": ""
})
def website_domains(self, website_id: int):
"""
获取网站域名列表
"""
endpoint = "/api/{}/websites/domains/{website_id}".format(self.ver, website_id=website_id)
return self._call_api("GET", endpoint)
def list_file_test(self):
endpoint = "/api/{}/files/search".format(self.ver)
return self._call_api("POST", endpoint, json_data={
"containSub": False,
"dir": True,
"expand": True,
"isDetail": True,
"page": 0,
"pageSize": 0,
"path": "/",
"search": "",
"showHidden": True,
"sortBy": "",
"sortOrder": ""
})
def list_file(self, path: str) -> Tuple[List[Dict], str]:
endpoint = "/api/{}/files/search".format(self.ver)
res = self._call_api("POST", endpoint, json_data={
"containSub": False,
"expand": True,
"isDetail": True,
"page": 1,
"pageSize": 1000,
"path": path,
"search": "",
"showHidden": True,
"sortBy": "name",
"sortOrder": "ascending"
})
if res is None:
return [], "Failed to retrieve file list"
if res["code"] != 200:
return [], res["message"]
if res["data"]["itemTotal"] > 1000:
return [], "The number of directory files exceeds 1000, please compress before operating"
elif res["data"]["itemTotal"] == 0:
return [], ""
return [] if res["data"]["items"] is None else res["data"]["items"], ""
def files_search(self, path: str, page: int, page_size: int, search: str):
endpoint = "/api/{}/files/search".format(self.ver)
res = self._call_api("POST", endpoint, json_data={
"containSub": False,
"expand": True,
"isDetail": True,
"page": page,
"pageSize": page_size,
"path": path,
"search": search,
"showHidden": True,
"sortBy": "name",
"sortOrder": "ascending"
})
if res is None:
return {}, "Failed to retrieve file list"
elif res["code"] != 200:
return {}, res["message"]
return res["data"], ""
def test_ver(self) -> bool:
self.ver = "v2"
self._call_err = None
res_data = self.list_file_test()
if res_data is None and isinstance(self._call_err, json.JSONDecodeError):
self.ver = "v1"
res_data = self.list_file_test()
if isinstance(res_data, dict):
return True
elif isinstance(res_data, dict):
return True
return False
def system_status(self):
endpoint = "/api/{}/dashboard/current".format(self.ver)
if self.ver == "v1":
return self._call_api("POST", endpoint, json_data={
"scope": "basic",
"ioOption": "all",
"netOption": "all"
})
else:
return self._call_api("GET", endpoint + "/all/all")
def open_port(self, port: int, protocol: str):
endpoint = "/api/{}/hosts/firewall/port".format(self.ver)
return self._call_api("POST", endpoint, json_data={
"protocol": protocol,
"source": "anyWhere",
"strategy": "accept",
"port": str(port),
"description": "aaaa",
"operation": "add",
"address": ""
})
def ws_shell(self, work_dir: str, cmd: str) -> Optional[str]:
import websocket
import base64
import threading
from urllib.parse import urlencode, urlparse
if self.ver != "v2":
return None
try:
pre_command = "PS1="" && stty -echo && clear && cd {}".format(work_dir, cmd)
p = {
"cols": 80,
"rows": 24,
"command": pre_command,
"operateNode": "local"
}
token, timestamp = self._generate_token()
u = urlparse(self.panel_address)
url = ("{}://{}/api/{}/hosts/terminal?{}".format
("ws" if u.scheme == "http" else "wss", u.netloc, self.ver, urlencode(p)))
ws = websocket.WebSocket()
ws.connect(url, header={"1Panel-Token": token, "1Panel-Timestamp": timestamp, })
if not cmd.endswith("\n"):
cmd += "\n"
ws.send(json.dumps({"type": "cmd", "data": base64.b64encode(cmd.encode("utf-8")).decode("utf-8")}))
res_str = ""
wait = False
def close_timeout():
time.sleep(5)
if wait:
ws.close()
threading.Thread(target=close_timeout).start()
while True:
wait = True
result = ws.recv()
wait = False
if result == "":
break
res_data = json.loads(result)
if res_data["type"] == "cmd":
res_str += base64.b64decode(res_data["data"]).decode("utf-8")
if pre_command in res_str:
res_str = res_str[res_str.index(pre_command) + len(pre_command):]
res_str = res_str.strip()
real_data = []
for line in res_str.split("\r\n"):
if line[0] == '\x1b':
continue
real_data.append(line)
real_data = "\n".join(real_data)
with open("test.txt", "w") as f:
f.write(real_data)
return real_data
except Exception as e:
print("error{}".format(str(e)))
traceback.print_exc()
return None
def chunkupload(self,
upload_name: str,
target_path: str,
chunk: Any, chunk_index: int, chunk_count: int) -> Tuple[str, Optional[dict]]:
token, timestamp = self._generate_token()
header = {"User-Agent": "Yak-Panel/Node Manager", "1Panel-Token": token, "1Panel-Timestamp": timestamp}
files = {'chunk': ("chunk", chunk, 'application/octet-stream')}
data = {
'path': target_path,
'filename': upload_name,
'chunkIndex': chunk_index,
'chunkCount': chunk_count,
}
url = "{}/api/{}/files/chunkupload".format(self.panel_address, self.ver)
try:
resp = requests.post(url, data=data, files=files, headers=header, verify=False, timeout=self.timeout)
if not resp.status_code == 200:
return "The response status code for uploading the file is incorrect. Please check if the node address and API are correct. The current status code is {}, and the return message is:{}".format(
resp.status_code, resp.text), None
return "", None if len(resp.text) < 3 else json.loads(resp.text)
except Exception as e:
return "Upload file: {} failed with error message:{}".format(upload_name, str(e)), None
def upload(self, filename: str, target_path: str, upload_name: str) -> str:
token, timestamp = self._generate_token()
header = {"User-Agent": "Yak-Panel/Node Manager", "1Panel-Token": token, "1Panel-Timestamp": timestamp}
try:
with open(filename, 'rb') as f:
file_data = f.read()
except Exception as e:
return "File {} failed to open, please check file permissions, error message is:{}".format(filename, str(e))
files = {'file': (upload_name, file_data, 'application/octet-stream')}
data = {
'path': target_path,
'overwrite': True
}
url = "{}/api/{}/files/upload".format(self.panel_address, self.ver)
try:
resp = requests.post(url, data=data, files=files, headers=header, verify=False, timeout=self.timeout)
if not resp.status_code == 200:
return "The response status code for uploading the file is incorrect. Please check if the node address and API are correct. The current status code is {}, and the return message is:{}".format(
resp.status_code, resp.text)
if not resp.json()["code"] == 200:
return "Upload file failed with error message:{}".format(resp.json()["message"])
return ""
except Exception as e:
return "Upload file: {} failed with error message:{}".format(filename, str(e))
def files_exits(self, paths: List[str]) -> Optional[dict]:
endpoint = "/api/{}/files/batch/check".format(self.ver)
return self._call_api("POST", endpoint, json_data={
"paths": paths,
})
def download_file(self, filename: str, target_path: str, download_name: str, **kwargs) -> str:
data = self.files_exits([filename])
file_size: Optional[int] = None
if not isinstance(data, dict):
return "Request file: {} status failed".format(filename)
for i in data["data"]:
if i["path"] == filename:
file_size = i["size"]
break
if file_size is None:
return "File {} does not exist, skip download".format(filename)
try:
if not os.path.isdir(target_path):
os.makedirs(target_path)
except Exception as e:
return "Failed to create folder {}, please check folder permissions, error message is:{}".format(target_path, str(e))
if file_size == 0:
fp = open(os.path.join(target_path, download_name), "w")
fp.close()
return ""
tmp_file = os.path.join(target_path, "{}.{}".format(download_name, uuid4().hex))
try:
if not os.path.exists(target_path):
os.makedirs(target_path)
fb = open(tmp_file, 'wb')
except Exception as e:
return "Failed to create temporary file {}, please check folder permissions, error message is:{}".format(tmp_file, str(e))
call_log = lambda *args, **keyword_args: None
if "call_log" in kwargs and callable(kwargs["call_log"]):
call_log = kwargs["call_log"]
try:
for i in range(0, file_size, 1024 * 1024 * 5):
start = i
end = min(i + 1024 * 1024 * 5 - 1, file_size - 1)
url = "{}/api/{}/files/chunkdownload".format(self.panel_address, self.ver)
data = {
'path': filename,
'name': os.path.basename(filename),
}
token, timestamp = self._generate_token()
header = {"User-Agent": "Yak-Panel/Node Manager", "1Panel-Token": token, "1Panel-Timestamp": timestamp}
header.update({"Range": "bytes={}-{}".format(start, end)})
resp = requests.post(url, json=data, headers=header, verify=False, stream=True, timeout=self.timeout)
if resp.status_code != 206:
return "The response status code for downloading the file is incorrect. Please check if the node address and API are correct. The current status code is {}, and the response header for the returned message is:{}".format(
resp.status_code, resp.headers)
fb.write(resp.content)
call_log(end // file_size, "File Download{} -> {}, Downloaded size{}".format(filename, target_path, end))
fb.flush()
if fb.tell() != file_size:
print(fb.tell(), file_size)
return "Download file {} failed with error message:{}".format(filename, "files are different sizes")
else:
fb.close()
shutil.move(tmp_file, os.path.join(target_path, download_name))
return ""
except Exception as e:
return "Download file {} failed with error message:{}".format(filename, str(e))
finally:
if not fb.closed:
fb.close()
if os.path.exists(tmp_file):
os.remove(tmp_file)
def dir_walk(self, path: str) -> Tuple[List[dict], str]:
dirs = [path]
res = []
count = 0
empty_dir = []
while dirs:
dir_path = dirs.pop(0)
try:
files, err = self.list_file(dir_path)
except Exception as e:
return [], str(e)
if err:
return [], err
if not files:
empty_dir.append(dir_path)
for i in files:
if i["isDir"]:
dirs.append(i["path"])
else:
res.append({
"path": i["path"],
"size": i["size"],
"is_dir": 0
})
count += 1
if count > 1000:
return [], "The number of directory files exceeds 1000, please compress before operating"
return [{"path": i, "size": 0, "is_dir": 1} for i in empty_dir] + res, ""
def remove_file(self, path: str, is_dir: bool) -> str:
return self._call_api("POST", "/api/{}/files/del".format(self.ver), json_data={
"isDir": is_dir,
"path": path,
"forceDelete": False
})
def download_proxy(self, filename: str):
try:
url = "{}/api/{}/files/download".format(self.panel_address, self.ver)
token, timestamp = self._generate_token()
header = {"User-Agent": "Yak-Panel/Node Manager", "1Panel-Token": token, "1Panel-Timestamp": timestamp}
resp = requests.get(url, params={
"operateNode": "local",
"path": filename
}, headers=header, stream=True, verify=False, timeout=self.timeout)
if not resp.status_code == 200:
return "The response status code for downloading the file is incorrect. Please check if the node address and API are correct. The current status code is {}, and the return message is:{}".format(
resp.status_code, resp.text)
from flask import send_file, stream_with_context, Response
filename = os.path.basename(filename)
if resp.headers.get("Content-Disposition", "").find("filename=") != -1:
filename = resp.headers.get("Content-Disposition", "").split("filename=")[1]
def generate():
for chunk in resp.iter_content(chunk_size=1024 * 1024 * 5):
if chunk:
yield chunk
# 设置响应头
headers = {
'Content-Type': resp.headers.get('Content-Type', 'application/octet-stream'),
'Content-Disposition': 'attachment; filename="{}"'.format(filename),
'Content-Length': resp.headers.get('Content-Length', ''),
'Accept-Ranges': 'bytes'
}
# 使用 stream_with_context 确保请求上下文在生成器运行时保持活跃
return Response(
stream_with_context(generate()),
headers=headers,
direct_passthrough=True
)
except Exception as e:
return "Download file: {} failed with error message:{}".format(filename, traceback.format_exc())
def dir_size(self, path: str):
return self._call_api("POST", "/api/{}/files/size".format(self.ver), json_data={
"path": path
})
def get_sshd_config(self) -> Optional[dict]:
res = self._call_api("POST", "/api/{}/hosts/ssh/search".format(self.ver))
if res is None:
return None
if res["code"] == 200:
return res.get("data", {})
return None
def create_dir(self, path: str):
return self._call_api("POST", "/api/{}/files".format(self.ver), {
"content": "",
"isDir": True,
"isLink": False,
"isSymlink": False,
"linkPath": "",
"mode": 0,
"path": path,
"sub": False
})
def restart_panel(self):
return self._call_api("POST", "/api/{}/dashboard/system/restart/{}".format(self.ver, "1panel"))
def server_reboot(self):
return self._call_api("POST", "/api/{}/dashboard/system/restart/{}".format(self.ver, "system"))
def get_file_body(self, path: str) -> Tuple[Optional[dict], str]:
res = self._call_api("POST", "/api/{}/files/content".format(self.ver), json_data={
"path": path,
"expand":True,
"isDetail": False,
"page":1,
"pageSize":100
})
if res is None:
return None, "Failed to retrieve file content"
if res["code"] == 200:
return res.get("data", {}), ""
return None, res.get("message")

View File

@@ -0,0 +1,129 @@
import json
from typing import Optional, Union, Tuple, List, Any, Dict
from .base import ServerNode, LocalNode
import public
class _RsyncAPIBase:
def has_rsync_perm(self) -> bool:
raise NotImplementedError()
def is_setup_rsync(self) -> bool:
raise NotImplementedError()
def add_module(self, path: str, name: str, password: str, add_white_ips: List[str]) -> Tuple[Optional[dict], str]:
raise NotImplementedError()
def add_send_task(self, sou):
pass
def get_secretkey(self, ip_type: str = "local_ip") -> Tuple[str, str]:
pass
def check_receiver_conn(self, secret_key: str, work_type: int) -> Tuple[Dict, str]:
pass
class BtLocalRsyncAPI(LocalNode, _RsyncAPIBase):
@classmethod
def new_by_id(cls, node_id: int) -> Optional['BtLocalRsyncAPI']:
node_data = public.S('node', self.node_db_obj._DB_FILE).where('id=?', (node_id,)).find()
if not node_data:
return None
if node_data["api_key"] == "local" and node_data["app_key"] == "local":
return BtLocalRsyncAPI()
return None
@staticmethod
def _plugin_func(func_name: str, **kwargs) -> Any:
from panelPlugin import panelPlugin
return panelPlugin().a(public.to_dict_obj({
"name": "rsync",
"s": func_name,
**kwargs,
}))
def has_rsync_perm(self) -> bool:
from panelPlugin import panelPlugin
res = panelPlugin().a(public.to_dict_obj({"name": "rsync"}))
if not res["status"]:
return False
return True
def is_setup_rsync(self) -> bool:
from panelPlugin import panelPlugin
res = panelPlugin().get_soft_find(public.to_dict_obj({"sName": "rsync"}))
try:
return res["setup"]
except:
return False
def add_module(self, path: str, name: str, password: str, add_white_ips: List[str]) -> Tuple[Optional[dict], str]:
res = self._plugin_func("add_module", **{
"path": path,
"mName": name,
"password": password,
"add_white_ips": json.dumps(add_white_ips)
})
return res, ""
class BtRsyncAPI(ServerNode, _RsyncAPIBase):
def _plugin_api_func(self, func_name: str, **kwargs) -> Tuple[Any, str]:
return self._request("/plugin", "a", pdata={
"name": "rsync",
"s": func_name,
**kwargs
})
@classmethod
def new_by_id(cls, node_id: int) -> Optional['BtRsyncAPI']:
node_data = public.S('node', self.node_db_obj._DB_FILE).where('id=?', (node_id,)).find()
if not node_data:
return None
if node_data["api_key"] == "local" and node_data["app_key"] == "local":
return None
if node_data['lpver']:
return None
return BtRsyncAPI(node_data["address"], node_data["api_key"], "")
def has_rsync_perm(self) -> bool:
data, err = self._request("/plugin", "a", pdata={"name": "rsync"})
if err:
return False
return data["status"]
def is_setup_rsync(self) -> bool:
data, err = self._request("/plugin", "get_soft_find", pdata={"sName": "rsync"})
if err:
return False
try:
return data["setup"]
except:
return False
def add_module(self, path: str, name: str, password: str, add_white_ips: List[str]) -> Tuple[Optional[dict], str]:
return self._plugin_api_func("add_module", **{
"path": path,
"mName": name,
"password": password,
"add_white_ips": json.dumps(add_white_ips)
})
def get_rsync_api_node(node_id: int) -> Optional[Union['BtRsyncAPI', 'BtLocalRsyncAPI']]:
srv = BtLocalRsyncAPI.new_by_id(node_id)
if srv:
return srv
return BtRsyncAPI.new_by_id(node_id)

View File

@@ -0,0 +1,783 @@
#!/bin/bash
# 确保输出是纯JSON不包含其他信息
export LANG=C
export LC_ALL=C
# 定义临时文件路径
NETWORK_DATA_FILE="/tmp/system_network_data_$(id -u).json"
# 收集网络接口数据并计算速率
collect_network() {
result="{"
first=true
current_time=$(date +%s)
# 读取之前的数据(如果存在)
prev_data=""
prev_time=0
if [ -f "$NETWORK_DATA_FILE" ]; then
prev_data=$(cat "$NETWORK_DATA_FILE")
prev_time=$(echo "$prev_data" | grep -o '"time": [0-9]*' | head -1 | awk '{print $2}')
[ -z "$prev_time" ] && prev_time=0
fi
# 创建临时存储当前数据的文件
temp_current_data="/tmp/system_network_current_$(id -u).json"
echo "{\"time\": $current_time," > "$temp_current_data"
# 计算时间间隔(秒)
time_diff=1
if [ $prev_time -ne 0 ]; then
time_diff=$((current_time - prev_time))
[ $time_diff -le 0 ] && time_diff=1 # 防止除以零
fi
# 收集所有网络接口的信息
for iface in $(ls /sys/class/net/ | grep -v "lo"); do
if [ "$first" = true ]; then
first=false
echo "\"interfaces\": {" >> "$temp_current_data"
else
result+=","
echo "," >> "$temp_current_data"
fi
# 读取当前网络接口统计
rx_bytes=$(cat /sys/class/net/$iface/statistics/rx_bytes 2>/dev/null || echo 0)
tx_bytes=$(cat /sys/class/net/$iface/statistics/tx_bytes 2>/dev/null || echo 0)
rx_packets=$(cat /sys/class/net/$iface/statistics/rx_packets 2>/dev/null || echo 0)
tx_packets=$(cat /sys/class/net/$iface/statistics/tx_packets 2>/dev/null || echo 0)
# 保存当前数据到临时文件
echo "\"$iface\": {\"rx_bytes\": $rx_bytes, \"tx_bytes\": $tx_bytes, \"rx_packets\": $rx_packets, \"tx_packets\": $tx_packets}" >> "$temp_current_data"
# 计算速率(如果有之前的数据)
down_speed=0
up_speed=0
if [ -n "$prev_data" ]; then
# 提取之前的数据
prev_rx_bytes=$(echo "$prev_data" | grep -o "\"$iface\".*rx_bytes.*tx_bytes" | grep -o "rx_bytes\": [0-9]*" | awk '{print $2}')
prev_tx_bytes=$(echo "$prev_data" | grep -o "\"$iface\".*tx_bytes.*rx_packets" | grep -o "tx_bytes\": [0-9]*" | awk '{print $2}')
# 如果找到了之前的数据,计算速率
if [ -n "$prev_rx_bytes" ] && [ -n "$prev_tx_bytes" ]; then
# 计算差值
rx_diff=$((rx_bytes - prev_rx_bytes))
tx_diff=$((tx_bytes - prev_tx_bytes))
# 确保值不是负数(可能由于系统重启计数器重置)
[ $rx_diff -lt 0 ] && rx_diff=0
[ $tx_diff -lt 0 ] && tx_diff=0
# 安全地计算速率
down_speed=$(awk "BEGIN {printf \"%.2f\", $rx_diff / $time_diff / 1024}")
up_speed=$(awk "BEGIN {printf \"%.2f\", $tx_diff / $time_diff / 1024}")
fi
fi
# 添加接口信息到结果
result+=$(cat << EOF
"$iface": {
"down": $down_speed,
"up": $up_speed,
"downTotal": $rx_bytes,
"upTotal": $tx_bytes,
"downPackets": $rx_packets,
"upPackets": $tx_packets
}
EOF
)
done
# 完成当前数据文件
if [ "$first" = false ]; then
echo "}" >> "$temp_current_data"
else
echo "\"interfaces\": {}" >> "$temp_current_data"
fi
echo "}" >> "$temp_current_data"
# 移动临时文件到持久文件位置
mv "$temp_current_data" "$NETWORK_DATA_FILE"
result+="}"
echo "$result"
}
# 收集总体网络统计
collect_total_network() {
current_time=$(date +%s)
# 初始化计数器
down_total=0
up_total=0
down_packets=0
up_packets=0
down_speed=0
up_speed=0
# 读取之前的数据(如果存在)
prev_data=""
prev_time=0
if [ -f "$NETWORK_DATA_FILE" ]; then
prev_data=$(cat "$NETWORK_DATA_FILE")
prev_time=$(echo "$prev_data" | grep -o '"time": [0-9]*' | head -1 | awk '{print $2}')
[ -z "$prev_time" ] && prev_time=0
fi
# 计算时间间隔(秒)
time_diff=1
if [ $prev_time -ne 0 ]; then
time_diff=$((current_time - prev_time))
[ $time_diff -le 0 ] && time_diff=1 # 防止除以零
fi
# 收集当前总流量
for iface in $(ls /sys/class/net/ | grep -v "lo"); do
# 读取当前网络接口统计
rx_bytes=$(cat /sys/class/net/$iface/statistics/rx_bytes 2>/dev/null || echo 0)
tx_bytes=$(cat /sys/class/net/$iface/statistics/tx_bytes 2>/dev/null || echo 0)
rx_packets=$(cat /sys/class/net/$iface/statistics/rx_packets 2>/dev/null || echo 0)
tx_packets=$(cat /sys/class/net/$iface/statistics/tx_packets 2>/dev/null || echo 0)
# 累加当前总量
down_total=$((down_total + rx_bytes))
up_total=$((up_total + tx_bytes))
down_packets=$((down_packets + rx_packets))
up_packets=$((up_packets + tx_packets))
done
# 收集之前的总流量
if [ -f "$NETWORK_DATA_FILE" ]; then
for iface in $(ls /sys/class/net/ | grep -v "lo"); do
# 提取之前的数据
iface_prev_rx=$(echo "$prev_data" | grep -o "\"$iface\".*rx_bytes.*tx_bytes" | grep -o "rx_bytes\": [0-9]*" | awk '{print $2}')
iface_prev_tx=$(echo "$prev_data" | grep -o "\"$iface\".*tx_bytes.*rx_packets" | grep -o "tx_bytes\": [0-9]*" | awk '{print $2}')
# 累加总流量
if [ -n "$iface_prev_rx" ]; then
prev_down_total=$((prev_down_total + iface_prev_rx))
fi
if [ -n "$iface_prev_tx" ]; then
prev_up_total=$((prev_up_total + iface_prev_tx))
fi
done
fi
# 计算总体速率
if [ $prev_time -ne 0 ]; then
rx_diff=$((down_total - prev_down_total))
tx_diff=$((up_total - prev_up_total))
# 确保值不是负数
[ $rx_diff -lt 0 ] && rx_diff=0
[ $tx_diff -lt 0 ] && tx_diff=0
down_speed=$(awk "BEGIN {printf \"%.2f\", $rx_diff / $time_diff/ 1024}")
up_speed=$(awk "BEGIN {printf \"%.2f\", $tx_diff / $time_diff/ 1024}")
fi
# 返回结果
cat << EOF
{
"down": $down_speed,
"up": $up_speed,
"downPackets": $down_packets,
"upPackets": $up_packets,
"downTotal": $down_total,
"upTotal": $up_total
}
EOF
}
# 收集CPU信息
collect_cpu() {
# 定义临时文件路径(使用 mktemp 提高安全性)
CPU_DATA_FILE="/tmp/system_cpu_data_$(id -u).json"
TEMP_CURRENT_DATA=$(mktemp "/tmp/system_cpu_current_XXXXXXX.json")
# 初始化返回值
local current_time
current_time=$(date +%s)
# 读取当前CPU统计信息
local current_cpu_stat
if ! current_cpu_stat=$(cat /proc/stat | grep '^cpu ' | awk '{
user_nice_system = ($2 + $3 + $4) + 0
idle = $5 + 0
total = (user_nice_system + idle + ($6 + 0) + ($7 + 0) + ($8 + 0))
printf "%d,%d,%d", user_nice_system, idle, total
}'); then
echo "Unable to read CPU statistics information" >&2
return 1
fi
local current_user_time=$(echo "$current_cpu_stat" | cut -d',' -f1)
local current_idle_time=$(echo "$current_cpu_stat" | cut -d',' -f2)
local current_total_time=$(echo "$current_cpu_stat" | cut -d',' -f3)
# 收集各核心当前统计信息
local core_stats=()
while read -r line; do
if [[ $line =~ ^cpu[0-9]+ ]]; then
local core_stat=$(echo "$line" | awk '{printf "%d,%d,%d", $2+$3+$4+$6+$7+$8, $5, $2+$3+$4+$5+$6+$7+$8}')
core_stats+=("$core_stat")
fi
done < /proc/stat
# 读取之前的数据(如果存在)
local prev_data=""
local prev_time=0
local prev_user_time=0
local prev_idle_time=0
local prev_total_time=0
local prev_core_stats=()
if [[ -f "$CPU_DATA_FILE" ]]; then
if ! prev_data=$(cat "$CPU_DATA_FILE"); then
echo "Unable to read historical CPU data" >&2
return 1
fi
prev_time=$(echo "$prev_data" | grep -o '"time": [0-9]*' | head -1 | awk '{print $2}')
prev_user_time=$(echo "$prev_data" | grep -o '"user_time": [0-9]*' | head -1 | awk '{print $2}')
prev_idle_time=$(echo "$prev_data" | grep -o '"idle_time": [0-9]*' | head -1 | awk '{print $2}')
prev_total_time=$(echo "$prev_data" | grep -o '"total_time": [0-9]*' | head -1 | awk '{print $2}')
# 使用 awk 跨行匹配核心数据
local i=0
while true; do
local core_data
core_data=$(echo "$prev_data" | awk -v core="core_$i" '
$0 ~ "\"" core "\": {" {flag=1; print; next}
flag && /}/ {print; flag=0; exit}
flag {print}
')
if [[ -z "$core_data" ]]; then
break
fi
local core_user_time=$(echo "$core_data" | grep -o '"user_time": [0-9]*' | awk '{print $2}')
local core_idle_time=$(echo "$core_data" | grep -o '"idle_time": [0-9]*' | awk '{print $2}')
local core_total_time=$(echo "$core_data" | grep -o '"total_time": [0-9]*' | awk '{print $2}')
prev_core_stats+=("$core_user_time,$core_idle_time,$core_total_time")
((i++))
done
fi
# 计算时间间隔(秒)
local time_diff=$((current_time - prev_time))
((time_diff <= 0)) && time_diff=1 # 防止除以零
# 计算总CPU使用率
local cpu_usage=0
if ((prev_total_time > 0)); then
local user_diff=$((current_user_time - prev_user_time))
local total_diff=$((current_total_time - prev_total_time))
# 防止负值(可能由于系统重启导致计数器重置)
((user_diff < 0)) && user_diff=0
((total_diff < 0)) && total_diff=0
if ((total_diff > 0)); then
cpu_usage=$(awk "BEGIN {printf \"%.2f\", ($user_diff / $total_diff) * 100}")
fi
fi
# 获取逻辑核心数
local logical_cores
logical_cores=$(nproc 2>/dev/null || getconf _NPROCESSORS_ONLN 2>/dev/null || echo 1)
# 计算每个核心的使用率
local cpu_cores_usage="["
local first=true
local i=0
for core_stat in "${core_stats[@]}"; do
local core_user_time=$(echo "$core_stat" | cut -d',' -f1)
local core_idle_time=$(echo "$core_stat" | cut -d',' -f2)
local core_total_time=$(echo "$core_stat" | cut -d',' -f3)
local core_usage=0
if ((i < ${#prev_core_stats[@]})); then
local prev_core_stat=${prev_core_stats[$i]}
local prev_core_user_time=$(echo "$prev_core_stat" | cut -d',' -f1)
local prev_core_idle_time=$(echo "$prev_core_stat" | cut -d',' -f2)
local prev_core_total_time=$(echo "$prev_core_stat" | cut -d',' -f3)
local core_user_diff=$((core_user_time - prev_core_user_time))
local core_total_diff=$((core_total_time - prev_core_total_time))
# 防止负值
((core_user_diff < 0)) && core_user_diff=0
((core_total_diff < 0)) && core_total_diff=0
if ((core_total_diff > 0)); then
core_usage=$(awk "BEGIN {printf \"%.2f\", ($core_user_diff / $core_total_diff) * 100}")
fi
fi
if [[ "$first" == true ]]; then
first=false
else
cpu_cores_usage+=","
fi
cpu_cores_usage+="$core_usage"
((i++))
done
cpu_cores_usage+="]"
# 获取CPU名称优先使用lscpu
local cpu_name
if command -v lscpu >/dev/null 2>&1; then
cpu_name=$(lscpu | grep "Model name" | head -n 1 | cut -d':' -f2 | sed 's/^[[:space:]]*//')
else
cpu_name=$(grep "model name" /proc/cpuinfo | head -n 1 | cut -d':' -f2 | sed 's/^[[:space:]]*//')
fi
# 获取核心数优先使用lscpu
local physical_cores=1
local physical_cpus=1
if command -v lscpu >/dev/null 2>&1; then
physical_cores=$(lscpu | grep "Core(s) per socket" | awk '{print $4}')
physical_cpus=$(lscpu | grep "Socket(s)" | awk '{print $2}')
else
# 备用方法:解析/proc/cpuinfo
physical_cpus=$(grep "physical id" /proc/cpuinfo | sort -u | wc -l)
physical_cores=$(grep "cpu cores" /proc/cpuinfo | head -1 | awk '{print $4}')
# 如果无法获取核心数,计算保守估算
if [[ -z "$physical_cores" ]]; then
physical_cores=$(( logical_cores / physical_cpus ))
fi
fi
# 确保变量有值
[[ -z "$physical_cores" ]] && physical_cores=1
[[ -z "$physical_cpus" ]] && physical_cpus=1
# 保存当前CPU统计信息到临时文件用于下次比较
{
echo "{"
echo " \"time\": $current_time,"
echo " \"user_time\": $current_user_time,"
echo " \"idle_time\": $current_idle_time,"
echo " \"total_time\": $current_total_time,"
# 保存每个核心的统计信息
local i=0
for core_stat in "${core_stats[@]}"; do
local core_user_time=$(echo "$core_stat" | cut -d',' -f1)
local core_idle_time=$(echo "$core_stat" | cut -d',' -f2)
local core_total_time=$(echo "$core_stat" | cut -d',' -f3)
echo " \"core_$i\": {"
echo " \"user_time\": $core_user_time,"
echo " \"idle_time\": $core_idle_time,"
echo " \"total_time\": $core_total_time"
if ((i < ${#core_stats[@]} - 1)); then
echo " },"
else
echo " }"
fi
((i++))
done
echo "}"
} > "$TEMP_CURRENT_DATA"
# 原子性替换文件
if ! mv "$TEMP_CURRENT_DATA" "$CPU_DATA_FILE"; then
echo "Unable to save CPU data to $CPU_DATA_FILE" >&2
rm -f "$TEMP_CURRENT_DATA"
return 1
fi
# 返回格式化的结果
echo "[$cpu_usage, $logical_cores, $cpu_cores_usage, \"$cpu_name\", $physical_cores, $physical_cpus]"
}
# 收集CPU时间
collect_cpu_times() {
# 获取CPU时间
cpu_line=$(cat /proc/stat | grep '^cpu ' | awk '{print $2,$3,$4,$5,$6,$7,$8,$9,$10,$11}')
read -r user nice system idle iowait irq softirq steal guest guest_nice <<< "$cpu_line"
# 获取进程信息
total_processes=$(ps -e | wc -l)
active_processes=$(ps -eo stat | grep -c "R")
cat << EOF
{
"user": $user,
"nice": $nice,
"system": $system,
"idle": $idle,
"iowait": $iowait,
"irq": $irq,
"softirq": $softirq,
"steal": $steal,
"guest": $guest,
"guest_nice": $guest_nice,
"Total number of processes": $total_processes,
"Number of activity processes": $active_processes
}
EOF
}
# 收集磁盘信息
collect_disk() {
df_output=$(df -TPB1 -x tmpfs -x devtmpfs | tail -n +2 | grep -vE "/boot\$" | grep -vE "docker/overlay2")
result="["
first=true
while read -r filesystem type total used avail pcent mountpoint; do
if [ "$first" = true ]; then
first=false
else
result+=","
fi
size_bytes=$total
size_used=$used
size_avail=$avail
# 格式化为人类可读大小使用单独的awk命令处理每个值
size_human=$(echo "$size_bytes" | awk '{
suffix="BKMGT"; value=$1;
for(i=1; value>=1024 && i<length(suffix); i++) value/=1024;
printf("%.2f%s", value, substr(suffix,i,1));
}')
size_used_human=$(echo "$size_used" | awk '{
suffix="BKMGT"; value=$1;
for(i=1; value>=1024 && i<length(suffix); i++) value/=1024;
printf("%.2f%s", value, substr(suffix,i,1));
}')
size_avail_human=$(echo "$size_avail" | awk '{
suffix="BKMGT"; value=$1;
for(i=1; value>=1024 && i<length(suffix); i++) value/=1024;
printf("%.2f%s", value, substr(suffix,i,1));
}')
# 收集inode信息
inode_info=$(df -i | grep -E "$mountpoint\$" | awk '{print $2,$3,$4,$5}')
read -r itotal iused iavail ipcent <<< "$inode_info"
# 确保inode值不为空
[ -z "$itotal" ] && itotal=0
[ -z "$iused" ] && iused=0
[ -z "$iavail" ] && iavail=0
[ -z "$ipcent" ] && ipcent="0%"
result+=$(cat << EOF
{
"filesystem": "$filesystem",
"types": "$type",
"path": "$mountpoint",
"rname": "$(basename "$mountpoint")",
"byte_size": [$size_bytes, $size_used, $size_avail],
"size": ["$size_human", "$size_used_human", "$size_avail_human"],
"d_size": "$pcent",
"inodes": [$itotal, $iused, $iavail, "$ipcent"]
}
EOF
)
done <<< "$df_output"
result+="]"
echo "$result"
}
# 收集IO统计
collect_iostat() {
result="{"
first=true
disks=$(ls /sys/block/ 2>/dev/null | grep -E '^(sd|hd|vd|nvme)' 2>/dev/null || echo "")
for disk in $disks; do
if [ -r "/sys/block/$disk/stat" ]; then
if [ "$first" = true ]; then
first=false
else
result+=","
fi
# 读取磁盘统计信息
disk_stats=$(cat /sys/block/$disk/stat 2>/dev/null)
if [ -n "$disk_stats" ]; then
# 使用默认值以防读取失败
read_comp=0 read_merged=0 read_sectors=0 read_ms=0 write_comp=0 write_merged=0 write_sectors=0 write_ms=0 io_in_progress=0 io_ms_weighted=0
# 尝试读取值
read read_comp read_merged read_sectors read_ms write_comp write_merged write_sectors write_ms io_in_progress io_ms_weighted <<< "$disk_stats"
# 转换扇区为字节 (512字节为一个扇区)
read_bytes=$((read_sectors * 512))
write_bytes=$((write_sectors * 512))
result+=$(cat << EOF
"$disk": {
"read_count": $read_comp,
"read_merged_count": $read_merged,
"read_bytes": $read_bytes,
"read_time": $read_ms,
"write_count": $write_comp,
"write_merged_count": $write_merged,
"write_bytes": $write_bytes,
"write_time": $write_ms
}
EOF
)
fi
fi
done
result+="}"
echo "$result"
}
# 收集负载信息
collect_load() {
load_avg=$(cat /proc/loadavg)
read -r one five fifteen others <<< "$load_avg"
cpu_count=$(nproc)
max_load=$((cpu_count * 2))
# 安全计算安全负载
safe_load=$(awk "BEGIN {printf \"%.2f\", $max_load * 0.7}")
cat << EOF
{
"one": $one,
"five": $five,
"fifteen": $fifteen,
"max": $max_load,
"limit": $cpu_count,
"safe": $safe_load
}
EOF
}
# 收集内存信息
collect_mem() {
mem_info=$(cat /proc/meminfo)
# 提取内存数据 (单位: KB)
mem_total=$(awk '/^MemTotal/ {print $2; exit}' <<< "$mem_info" || echo 0)
mem_free=$(awk '/^MemFree/ {print $2; exit}' <<< "$mem_info" || echo 0)
mem_available=$(awk '/^MemAvailable/ {print $2; exit}' <<< "$mem_info" || echo "$mem_free")
mem_buffers=$(awk '/^Buffers/ {print $2; exit}' <<< "$mem_info" || echo 0)
mem_cached=$(awk '/^Cached:/ {print $2; exit}' <<< "$mem_info" || echo 0)
mem_sreclaimable=$(awk '/^SReclaimable:/ {print $2; exit}' <<< "$mem_info" || echo 0)
mem_buffers=$(awk '/^Buffers:/ {print $2; exit}' <<< "$mem_info" || echo 0)
mem_shared=$(awk '/^Shmem/ {print $2; exit}' <<< "$mem_info" || echo 0)
# 确保数值有效
[ -z "$mem_total" ] && mem_total=0
[ -z "$mem_free" ] && mem_free=0
[ -z "$mem_available" ] && mem_available=0
[ -z "$mem_buffers" ] && mem_buffers=0
[ -z "$mem_cached" ] && mem_cached=0
[ -z "$mem_shared" ] && mem_shared=0
[ -z "$mem_sreclaimable" ] && mem_sreclaimable=0
[ -z "$mem_buffers" ] && mem_buffers=0
# 安全计算实际使用的内存
mem_real_used=$((mem_total - mem_free - mem_buffers - mem_cached - mem_sreclaimable - mem_buffers))
[ $mem_real_used -lt 0 ] && mem_real_used=0
# 转换为人类可读格式(单独处理每个值)
mem_new_total=$(awk -v bytes="$((mem_total * 1024))" 'BEGIN {
suffix="BKMGT"; value=bytes;
for(i=1; value>=1024 && i<length(suffix); i++) value/=1024;
printf("%.2f%s", value, substr(suffix,i,1));
}')
mem_new_real_used=$(awk -v bytes="$((mem_real_used * 1024))" 'BEGIN {
suffix="BKMGT"; value=bytes;
for(i=1; value>=1024 && i<length(suffix); i++) value/=1024;
printf("%.2f%s", value, substr(suffix,i,1));
}')
# 转为字节
mem_total_bytes=$((mem_total * 1024))
mem_free_bytes=$((mem_free * 1024))
mem_available_bytes=$((mem_available * 1024))
mem_buffers_bytes=$((mem_buffers * 1024))
mem_cached_bytes=$((mem_cached * 1024 + mem_sreclaimable * 1024 + mem_buffers* 1024))
mem_real_used_bytes=$((mem_real_used * 1024))
mem_shared_bytes=$((mem_shared * 1024))
cat << EOF
{
"memTotal": $mem_total_bytes,
"memFree": $mem_free_bytes,
"memAvailable": $mem_available_bytes,
"memBuffers": $mem_buffers_bytes,
"memCached": $mem_cached_bytes,
"memRealUsed": $mem_real_used_bytes,
"memShared": $mem_shared_bytes,
"memNewTotal": "$mem_new_total",
"memNewRealUsed": "$mem_new_real_used"
}
EOF
}
# 收集dmidecode物理内存信息
collect_physical_memory() {
# 检查是否有sudo权限
if command -v sudo >/dev/null 2>&1; then
SUDO_CMD="sudo"
else
SUDO_CMD=""
fi
# 检查dmidecode是否已安装
if ! command -v dmidecode >/dev/null 2>&1; then
# 尝试安装dmidecode
if command -v apt-get >/dev/null 2>&1; then
$SUDO_CMD apt-get update >/dev/null 2>&1 && $SUDO_CMD apt-get install -y dmidecode >/dev/null 2>&1
elif command -v yum >/dev/null 2>&1; then
$SUDO_CMD yum install -y dmidecode >/dev/null 2>&1
elif command -v dnf >/dev/null 2>&1; then
$SUDO_CMD dnf install -y dmidecode >/dev/null 2>&1
elif command -v zypper >/dev/null 2>&1; then
$SUDO_CMD zypper install -y dmidecode >/dev/null 2>&1
elif command -v pacman >/dev/null 2>&1; then
$SUDO_CMD pacman -S --noconfirm dmidecode >/dev/null 2>&1
fi
fi
# 再次检查dmidecode是否可用
if command -v dmidecode >/dev/null 2>&1; then
# 首先尝试获取Maximum Capacity
max_capacity=$($SUDO_CMD dmidecode -t memory 2>/dev/null | grep -i "Maximum Capacity:" | head -n1 | awk '
{
value = $3
unit = $4
# 转换为字节
if (unit == "GB" || unit == "gb") {
bytes = value * 1024 * 1024 * 1024
} else if (unit == "MB" || unit == "mb") {
bytes = value * 1024 * 1024
} else if (unit == "TB" || unit == "tb") {
bytes = value * 1024 * 1024 * 1024 * 1024
} else {
bytes = 0
}
printf "%.0f", bytes
}
')
if [ -n "$max_capacity" ] && [ "$max_capacity" -gt 0 ] 2>/dev/null; then
echo "$max_capacity"
return 0
fi
# 如果Maximum Capacity获取失败尝试获取已安装内存大小
total_memory=$($SUDO_CMD dmidecode -t memory 2>/dev/null | grep -i "Size:" | grep -i "[0-9]* GB\|[0-9]* MB" | awk '
BEGIN { total = 0 }
{
value = $2
unit = $3
# 转换为字节
if (unit == "GB" || unit == "gb") {
bytes = value * 1024 * 1024 * 1024
} else if (unit == "MB" || unit == "mb") {
bytes = value * 1024 * 1024
}
total += bytes
}
END {
printf "%.0f", total
}
')
if [ -n "$total_memory" ] && [ "$total_memory" -gt 0 ] 2>/dev/null; then
echo "$total_memory"
return 0
fi
fi
# 如果任何步骤失败返回0
echo "0"
return 1
}
# 主函数收集所有信息并生成JSON
main() {
# 收集系统信息
os_name=$(cat /etc/os-release 2>/dev/null | grep "PRETTY_NAME" | cut -d "=" -f 2 | tr -d '"' || echo "Unknown")
simple_system=$(awk -F= '
/^ID=/ {id=$2}
/^VERSION_ID=/ {gsub(/"/,"",$2); version=$2}
END {
gsub(/"/,"",id);
print toupper(substr(id,1,1)) substr(id,2) " " version
}' /etc/os-release 2>/dev/null || echo "Unknown")
hostname=$(hostname)
current_time=$(date "+%Y-%m-%d %H:%M:%S")
version="1.0.0" # 自定义版本
# 假设的站点和数据库计数 (实际需要根据具体环境采集)
site_total=0
database_total=0
ftp_total=0
installed=true
# 收集网络总统计
network_stats=$(collect_total_network)
down=$(echo "$network_stats" | grep -o '"down": [0-9.]*' | cut -d ":" -f 2 | tr -d " " || echo "0.00")
up=$(echo "$network_stats" | grep -o '"up": [0-9.]*' | cut -d ":" -f 2 | tr -d " " || echo "0.00")
down_packets=$(echo "$network_stats" | grep -o '"downPackets": [0-9]*' | cut -d ":" -f 2 | tr -d " " || echo "0")
up_packets=$(echo "$network_stats" | grep -o '"upPackets": [0-9]*' | cut -d ":" -f 2 | tr -d " " || echo "0")
down_total=$(echo "$network_stats" | grep -o '"downTotal": [0-9]*' | cut -d ":" -f 2 | tr -d " " || echo "0")
up_total=$(echo "$network_stats" | grep -o '"upTotal": [0-9]*' | cut -d ":" -f 2 | tr -d " " || echo "0")
physical_memory=$(collect_physical_memory)
# 生成最终JSON
cat << EOF
{
"cpu": $(collect_cpu),
"cpu_times": $(collect_cpu_times),
"disk": $(collect_disk),
"iostat": $(collect_iostat),
"load": $(collect_load),
"mem": $(collect_mem),
"network": $(collect_network),
"system": "$os_name",
"simple_system": "$simple_system",
"title": "$hostname",
"time": "$current_time",
"version": "$version",
"site_total": $site_total,
"database_total": $database_total,
"ftp_total": $ftp_total,
"installed": $installed,
"down": $down,
"up": $up,
"downPackets": $down_packets,
"upPackets": $up_packets,
"downTotal": $down_total,
"upTotal": $up_total,
"physical_memory": $physical_memory
}
EOF
}
# 执行主函数
main

View File

@@ -0,0 +1,199 @@
import json
import os.path
import traceback
from typing import Optional, Tuple, Callable, Union, Dict
from mod.base.ssh_executor import SSHExecutor, CommandResult
from mod.project.node.dbutil import ServerNodeDB, Node
import public
def is_much_difference(a:float, b:float)->bool:
if a == 0 or b == 0:
return True
ratio = a / b
return ratio >= 10 or ratio <= 0.1
class SSHApi:
is_local = False
_local_scripts_dir = os.path.join(os.path.dirname(__file__), "ssh_warp_scripts")
def __init__(self, host, port: int=22, username: str="root", password=None, pkey=None,
pkey_passwd=None, threading_mod=False, timeout=20):
self._real_ssh_conf = {
"host": host,
"username": username,
"port": port,
"password": password,
"key_file": "",
"passphrase": pkey_passwd,
"key_data": pkey,
"strict_host_key_checking": False,
"allow_agent": False,
"look_for_keys": False,
"threading_mod": threading_mod,
"timeout": timeout,
}
self._ssh_executor: Optional[SSHExecutor] = None
@classmethod
def new_by_id(cls, node_id: int, threading_mod=False) -> Optional["SSHApi"]:
data = ServerNodeDB().get_node_by_id(node_id)
if not data or not isinstance(data, dict):
return None
data["ssh_conf"] = json.loads(data["ssh_conf"])
if not data["ssh_conf"]:
return None
data["ssh_conf"]["threading_mod"] = threading_mod
return cls(**data["ssh_conf"])
def _get_ssh_executor(self) -> SSHExecutor:
if self._ssh_executor:
return self._ssh_executor
self._ssh_executor = SSHExecutor(**self._real_ssh_conf)
return self._ssh_executor
def get_net_work(self) -> Tuple[Optional[dict], str]:
data, err = self._run_script("system_info.sh")
if err:
return None, err
if not data.exit_code == 0:
return None, data.stderr
try:
data = json.loads(data.stdout)
if isinstance(data, dict) and "cpu" in data and "mem" in data:
return self._tans_net_work_form_data(data), ""
return None, "data in wrong format: %s" % str(data)
except Exception as e:
return None, str(e)
@staticmethod
def _tans_net_work_form_data(data: dict):
data["mem"]["memAvailable"] = round(data["mem"]["memAvailable"] / 1024 / 1024, 2)
data["mem"]["memBuffers"] = round(data["mem"]["memBuffers"] / 1024 / 1024, 2)
data["mem"]["memCached"] = round(data["mem"]["memCached"] / 1024 / 1024, 2)
data["mem"]["memFree"] = round(data["mem"]["memFree"] / 1024 / 1024, 2)
data["mem"]["memRealUsed"] = round(data["mem"]["memRealUsed"] / 1024 / 1024, 2)
data["mem"]["memShared"] = round(data["mem"]["memShared"] / 1024 / 1024, 2)
data["mem"]["memTotal"] = round(data["mem"]["memTotal"] / 1024 / 1024, 2)
data["physical_memory"]= round(data["physical_memory"] / 1024 / 1024, 2)
if is_much_difference(data["mem"]["memTotal"], data["physical_memory"]):
if data["mem"]["memTotal"] >= 1024:
data["mem"]["memNewTotal"] = "%.2fGB" % (data["mem"]["memTotal"] / 1024)
else:
data["mem"]["memNewTotal"] = "%.2fMB" % data["mem"]["memTotal"]
else:
if data["physical_memory"] >= 1024:
data["mem"]["memNewTotal"] = "%.2fGB" % (data["physical_memory"] / 1024)
else:
data["mem"]["memNewTotal"] = "%.2fMB" % data["physical_memory"]
return data
def _run_script(self, script_name: str) -> Tuple[Optional[CommandResult], str]:
local_file = os.path.join(self._local_scripts_dir, script_name)
if not os.path.exists(local_file):
return None, "Script does not exist"
executor = None
try:
executor = self._get_ssh_executor()
executor.open()
result = executor.execute_local_script_collect(local_file)
return result, ""
except RuntimeError:
return None, "SSH connection failed"
except Exception as e:
return None, str(e)
finally:
if executor:
executor.close()
def target_file_exits(self, target_file: str) -> Tuple[bool, str]:
try:
executor = self._get_ssh_executor()
executor.open()
result, err = executor.path_exists(target_file)
return result, err
except RuntimeError:
print(traceback.format_exc(), flush=True)
return False, "SSH connection failed"
except Exception as e:
print(traceback.format_exc(), flush=True)
return False, str(e)
def create_dir(self, path: str) -> Tuple[bool, str]:
try:
executor = self._get_ssh_executor()
executor.open()
result, err = executor.create_dir(path)
return result, err
except RuntimeError:
print(traceback.format_exc())
return False, "SSH connection failed"
except Exception as e:
return False, str(e)
def upload_file(self, filename: str, target_path: str, mode: str = "cover",
call_log: Callable[[int, str], None] = None) -> str:
if not os.path.isfile(filename):
return "File: {} does not exist".format(filename)
target_file = os.path.join(target_path, os.path.basename(filename))
path_info = self.path_info(target_file)
if isinstance(path_info, str):
return path_info
if path_info['exists'] and mode == "ignore":
call_log(0, "File upload:{} -> {},The target file already exists, skip uploading".format(filename, target_file))
return ""
if path_info['exists'] and mode == "rename":
upload_name = "{}_{}".format(os.path.basename(filename), public.md5(filename))
call_log(0, "File upload:{} -> {},The target file already exists, it will be renamed to {}".format(filename, target_file, upload_name))
else:
upload_name = os.path.basename(filename)
try:
executor = self._get_ssh_executor()
executor.open()
def progress_callback(current_size: int, total_size: int):
if total_size == 0:
return
call_log(current_size * 100 // total_size, "" )
executor.upload(filename, os.path.join(target_path, upload_name), progress_callback=progress_callback)
except RuntimeError:
print(traceback.format_exc(), flush=True)
return "SSH connection failed"
except Exception as e:
print(traceback.format_exc(), flush=True)
return str(e)
return ""
def upload_dir_check(self, target_file: str) -> str:
try:
executor = self._get_ssh_executor()
executor.open()
path_info = executor.path_info(target_file)
if not path_info['exists']:
return ""
if path_info['is_dir']:
return "The name path is not a directory"
return ""
except RuntimeError:
print(traceback.format_exc(), flush=True)
return "SSH connection failed"
except Exception as e:
print(traceback.format_exc(), flush=True)
return str(e)
def path_info(self, path: str) -> Union[str, Dict]:
try:
executor = self._get_ssh_executor()
executor.open()
path_info = executor.path_info(path)
return path_info
except RuntimeError as e:
print(traceback.format_exc(), flush=True)
return "SSH connection failed: {}".format(str(e))
except Exception as e:
print(traceback.format_exc(), flush=True)
return "Failed to obtain path information:{}".format(str(e))